hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
b3d2a071850c4bd832a1c5c5923aa24f8bddbf4a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#define SIZE 32
/* Autores:
*
* Antonio J. Cabrera
* Paul Gazel-Anthoine
*/
// Structs (H)
typedef struct bmpFileHeader {
/* 2 bytes de identificacin */
uint32_t size; /* Tamao del archivo */
uint16_t resv1; /* Reservado */
uint16_t resv2; /* Reservado */
uint32_t offset; /* Offset hasta hasta los datos de imagen */
} bmpFileHeader;
typedef struct bmpInfoHeader {
uint32_t headersize; /* Tamao de la cabecera */
uint32_t width; /* Ancho */
uint32_t height; /* Alto */
uint16_t planes; /* Planos de color (Siempre 1) */
uint16_t bpp; /* bits por pixel */
uint32_t compress; /* compresion */
uint32_t imgsize; /* tamao de los datos de imagen */
uint32_t bpmx; /* Resolucion X en bits por metro */
uint32_t bpmy; /* Resolucion Y en bits por metro */
uint32_t colors; /* colors used en la paleta */
uint32_t imxtcolors; /* Colores importantes. 0 si son todos */
} bmpInfoHeader;
// Rutinas BMP (C)
unsigned char *LoadBMP(char *filename, bmpInfoHeader *bInfoHeader) {
FILE *f;
bmpFileHeader header; /* cabecera */
unsigned char *imgdata; /* datos de imagen */
uint16_t type; /* 2 bytes identificativos */
f=fopen (filename, "r");
if (!f) { /* Si no podemos leer, no hay imagen */
printf("NO se puede abrir el fichero %s\n", filename);
return NULL;
}
/* Leemos los dos primeros bytes y comprobamos el formato */
fread(&type, sizeof(uint16_t), 1, f);
if (type !=0x4D42) {
fclose(f);
printf("%s NO es una imagen BMP\n", filename);
return NULL;
}
/* Leemos la cabecera del fichero */
fread(&header, sizeof(bmpFileHeader), 1, f);
printf("File size: %u\n", header.size);
printf("Reservado: %u\n", header.resv1);
printf("Reservado: %u\n", header.resv2);
printf("Offset: %u\n", header.offset);
/* Leemos la cabecera de informacin del BMP */
fread(bInfoHeader, sizeof(bmpInfoHeader), 1, f);
/* Reservamos memoria para la imagen, lo que indique imgsize */
if (bInfoHeader->imgsize == 0) bInfoHeader->imgsize = ((bInfoHeader->width*3 +3) / 4) * 4 * bInfoHeader->height;
imgdata = (unsigned char*) malloc(bInfoHeader->imgsize);
if (imgdata == NULL) {
printf("Fallo en el malloc, del fichero %s\n", filename);
exit(0);
}
/* Nos situamos en donde empiezan los datos de imagen, lo indica el offset de la cabecera de fichero */
fseek(f, header.offset, SEEK_SET);
/* Leemos los datos de la imagen, tantos bytes como imgsize */
fread(imgdata, bInfoHeader->imgsize,1, f);
/* Cerramos el fichero */
fclose(f);
/* Devolvemos la imagen */
return imgdata;
}
bmpInfoHeader *createInfoHeader(uint32_t width, uint32_t height, uint32_t ppp) {
bmpInfoHeader *InfoHeader;
bool IH;
IH = malloc(sizeof(bmpInfoHeader));
if (!IH) return NULL;
InfoHeader->headersize = sizeof(bmpInfoHeader);
InfoHeader->width = width;
InfoHeader->height = height;
InfoHeader->planes = 1;
InfoHeader->bpp = 24;
InfoHeader->compress = 0;
/* 3 bytes por pixel, width*height pixels, el tamao de las filas ha de ser multiplo de 4 */
InfoHeader->imgsize = ((width*3 + 3) / 4) * 4 * height;
InfoHeader->bpmx = (unsigned) ((double)ppp*100/2.54);
InfoHeader->bpmy= InfoHeader->bpmx; /* Misma resolucion vertical y horiontal */
InfoHeader->colors = 0;
InfoHeader->imxtcolors = 0;
return InfoHeader;
}
void SaveBMP(char *filename, bmpInfoHeader *InfoHeader, unsigned char *imgdata) {
bmpFileHeader header;
FILE *f;
uint16_t type;
f=fopen(filename, "w+");
header.size = InfoHeader->imgsize + sizeof(bmpFileHeader) + sizeof(bmpInfoHeader) +2;//2
header.resv1 = 0;
header.resv2 = 0;
/* El offset ser el tamao de las dos cabeceras + 2 (informacin de fichero)*/
header.offset=sizeof(bmpFileHeader)+sizeof(bmpInfoHeader) +2;//2
/* Escribimos la identificacin del archivo */
type=0x4D42;
fwrite(&type, sizeof(type),1,f);
/* Escribimos la cabecera de fichero */
fwrite(&header, sizeof(bmpFileHeader),1,f);
/* Escribimos la informacin bsica de la imagen */
fwrite(InfoHeader, sizeof(bmpInfoHeader),1,f);
/* Escribimos la imagen */
fwrite(imgdata, InfoHeader->imgsize, 1, f);
fclose(f);
}
void DisplayInfo(char *FileName, bmpInfoHeader *InfoHeader)
{
printf("\n");
printf("Informacion de %s\n", FileName);
printf("Tamao de la cabecera: %u bytes\n", InfoHeader->headersize);
printf("Anchura: %d pixels\n", InfoHeader->width);
printf("Altura: %d pixels\n", InfoHeader->height);
printf("Planos (1): %d\n", InfoHeader->planes);
printf("Bits por pixel: %d\n", InfoHeader->bpp);
printf("Compresion: %d\n", InfoHeader->compress);
printf("Tamao de la imagen: %u bytes\n", InfoHeader->imgsize);
printf("Resolucion horizontal: %u px/m\n", InfoHeader->bpmx);
printf("Resolucion vertical: %u px/m\n", InfoHeader->bpmy);
if (InfoHeader->bpmx == 0)
InfoHeader->bpmx = (unsigned) ((double)24*100/2.54);
if (InfoHeader->bpmy == 0)
InfoHeader->bpmy = (unsigned) ((double)24*100/2.54);
printf("Colores en paleta: %d\n", InfoHeader->colors);
printf("Colores importantes: %d\n", InfoHeader->imxtcolors);
}
/*
------------------------------------------------
Nuestro Cdigo
------------------------------------------------
*/
__global__ void KernelByN (int N, int M, unsigned char *A) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + 3*threadIdx.x;
if(row < M && col < N)
A[row*N+col] = A[row*N+col+1] = A[row*N+col+2] = (A[row*N+col] + A[row*N+col+1] + A[row*N+col+2])/3;
}
int main(int argc, char** argv)
{
unsigned int N, M;
unsigned int numBytes;
unsigned int nBlocks, nThreads;
float TiempoTotal, TiempoKernel;
hipEvent_t E0, E1, E2, E3;
unsigned char *d_A;
if (argc != 3) { printf("Usage: ./exe img.bmp prefix\n"); exit(0); }
printf("INICIO\n");
bmpInfoHeader header;
unsigned char *image;
image = LoadBMP(argv[1], &header);
unsigned int N3 = header.width * 3;
N = (N3+3) & 0xFFFFFFFC; // Fila multiplo de 4 (BMP)
M = header.height;
// numero de Threads en cada dimension
nThreads = SIZE;
// numero de Blocks en cada dimension
nBlocks = (N+nThreads-1)/nThreads;
numBytes = N * M * sizeof(unsigned char);
dim3 dimGrid(nBlocks, nBlocks, 1);
dim3 dimBlock(nThreads, nThreads, 1);
hipEventCreate(&E0);
hipEventCreate(&E1);
hipEventCreate(&E2);
hipEventCreate(&E3);
hipEventRecord(E0, 0);
hipEventSynchronize(E0);
// Obtener Memoria en el device
hipMalloc((unsigned char**)&d_A, numBytes);
// Copiar datos desde el host en el device
hipMemcpy(d_A, image, numBytes, hipMemcpyHostToDevice);
hipEventRecord(E1, 0);
hipEventSynchronize(E1);
// Ejecutar el kernel
hipLaunchKernelGGL(( KernelByN), dim3(dimGrid), dim3(dimBlock), 0, 0, N, M, d_A);
hipEventRecord(E2, 0);
hipEventSynchronize(E2);
// Obtener el resultado desde el host
hipMemcpy(image, d_A, numBytes, hipMemcpyDeviceToHost);
// Liberar Memoria del device
hipFree(d_A);
hipEventRecord(E3, 0);
hipEventSynchronize(E3);
hipEventElapsedTime(&TiempoTotal, E0, E3);
hipEventElapsedTime(&TiempoKernel, E1, E2);
printf("\nKERNEL ByN\n");
printf("Dimensiones: %dx%d\n", N, M);
printf("nThreads: %dx%d (%d)\n", nThreads, nThreads, nThreads*nThreads);
printf("nBlocks: %dx%d (%d)\n", nBlocks, nBlocks, nBlocks*nBlocks);
printf("Tiempo Global: %4.6f milseg\n", TiempoTotal);
printf("Tiempo Kernel: %4.6f milseg\n", TiempoKernel);
hipEventDestroy(E0); hipEventDestroy(E1); hipEventDestroy(E2); hipEventDestroy(E3);
char nom[32];
strcpy(nom, argv[2]);
strcat(nom, "_");
strcat(nom,argv[1]);
SaveBMP(nom, &header, image);
}
| b3d2a071850c4bd832a1c5c5923aa24f8bddbf4a.cu |
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#define SIZE 32
/* Autores:
*
* Antonio J. Cabrera
* Paul Gazel-Anthoine
*/
// Structs (H)
typedef struct bmpFileHeader {
/* 2 bytes de identificación */
uint32_t size; /* Tamaño del archivo */
uint16_t resv1; /* Reservado */
uint16_t resv2; /* Reservado */
uint32_t offset; /* Offset hasta hasta los datos de imagen */
} bmpFileHeader;
typedef struct bmpInfoHeader {
uint32_t headersize; /* Tamaño de la cabecera */
uint32_t width; /* Ancho */
uint32_t height; /* Alto */
uint16_t planes; /* Planos de color (Siempre 1) */
uint16_t bpp; /* bits por pixel */
uint32_t compress; /* compresion */
uint32_t imgsize; /* tamaño de los datos de imagen */
uint32_t bpmx; /* Resolucion X en bits por metro */
uint32_t bpmy; /* Resolucion Y en bits por metro */
uint32_t colors; /* colors used en la paleta */
uint32_t imxtcolors; /* Colores importantes. 0 si son todos */
} bmpInfoHeader;
// Rutinas BMP (C)
unsigned char *LoadBMP(char *filename, bmpInfoHeader *bInfoHeader) {
FILE *f;
bmpFileHeader header; /* cabecera */
unsigned char *imgdata; /* datos de imagen */
uint16_t type; /* 2 bytes identificativos */
f=fopen (filename, "r");
if (!f) { /* Si no podemos leer, no hay imagen */
printf("NO se puede abrir el fichero %s\n", filename);
return NULL;
}
/* Leemos los dos primeros bytes y comprobamos el formato */
fread(&type, sizeof(uint16_t), 1, f);
if (type !=0x4D42) {
fclose(f);
printf("%s NO es una imagen BMP\n", filename);
return NULL;
}
/* Leemos la cabecera del fichero */
fread(&header, sizeof(bmpFileHeader), 1, f);
printf("File size: %u\n", header.size);
printf("Reservado: %u\n", header.resv1);
printf("Reservado: %u\n", header.resv2);
printf("Offset: %u\n", header.offset);
/* Leemos la cabecera de información del BMP */
fread(bInfoHeader, sizeof(bmpInfoHeader), 1, f);
/* Reservamos memoria para la imagen, lo que indique imgsize */
if (bInfoHeader->imgsize == 0) bInfoHeader->imgsize = ((bInfoHeader->width*3 +3) / 4) * 4 * bInfoHeader->height;
imgdata = (unsigned char*) malloc(bInfoHeader->imgsize);
if (imgdata == NULL) {
printf("Fallo en el malloc, del fichero %s\n", filename);
exit(0);
}
/* Nos situamos en donde empiezan los datos de imagen, lo indica el offset de la cabecera de fichero */
fseek(f, header.offset, SEEK_SET);
/* Leemos los datos de la imagen, tantos bytes como imgsize */
fread(imgdata, bInfoHeader->imgsize,1, f);
/* Cerramos el fichero */
fclose(f);
/* Devolvemos la imagen */
return imgdata;
}
bmpInfoHeader *createInfoHeader(uint32_t width, uint32_t height, uint32_t ppp) {
bmpInfoHeader *InfoHeader;
bool IH;
IH = malloc(sizeof(bmpInfoHeader));
if (!IH) return NULL;
InfoHeader->headersize = sizeof(bmpInfoHeader);
InfoHeader->width = width;
InfoHeader->height = height;
InfoHeader->planes = 1;
InfoHeader->bpp = 24;
InfoHeader->compress = 0;
/* 3 bytes por pixel, width*height pixels, el tamaño de las filas ha de ser multiplo de 4 */
InfoHeader->imgsize = ((width*3 + 3) / 4) * 4 * height;
InfoHeader->bpmx = (unsigned) ((double)ppp*100/2.54);
InfoHeader->bpmy= InfoHeader->bpmx; /* Misma resolucion vertical y horiontal */
InfoHeader->colors = 0;
InfoHeader->imxtcolors = 0;
return InfoHeader;
}
void SaveBMP(char *filename, bmpInfoHeader *InfoHeader, unsigned char *imgdata) {
bmpFileHeader header;
FILE *f;
uint16_t type;
f=fopen(filename, "w+");
header.size = InfoHeader->imgsize + sizeof(bmpFileHeader) + sizeof(bmpInfoHeader) +2;//2
header.resv1 = 0;
header.resv2 = 0;
/* El offset será el tamaño de las dos cabeceras + 2 (información de fichero)*/
header.offset=sizeof(bmpFileHeader)+sizeof(bmpInfoHeader) +2;//2
/* Escribimos la identificación del archivo */
type=0x4D42;
fwrite(&type, sizeof(type),1,f);
/* Escribimos la cabecera de fichero */
fwrite(&header, sizeof(bmpFileHeader),1,f);
/* Escribimos la información básica de la imagen */
fwrite(InfoHeader, sizeof(bmpInfoHeader),1,f);
/* Escribimos la imagen */
fwrite(imgdata, InfoHeader->imgsize, 1, f);
fclose(f);
}
void DisplayInfo(char *FileName, bmpInfoHeader *InfoHeader)
{
printf("\n");
printf("Informacion de %s\n", FileName);
printf("Tamaño de la cabecera: %u bytes\n", InfoHeader->headersize);
printf("Anchura: %d pixels\n", InfoHeader->width);
printf("Altura: %d pixels\n", InfoHeader->height);
printf("Planos (1): %d\n", InfoHeader->planes);
printf("Bits por pixel: %d\n", InfoHeader->bpp);
printf("Compresion: %d\n", InfoHeader->compress);
printf("Tamaño de la imagen: %u bytes\n", InfoHeader->imgsize);
printf("Resolucion horizontal: %u px/m\n", InfoHeader->bpmx);
printf("Resolucion vertical: %u px/m\n", InfoHeader->bpmy);
if (InfoHeader->bpmx == 0)
InfoHeader->bpmx = (unsigned) ((double)24*100/2.54);
if (InfoHeader->bpmy == 0)
InfoHeader->bpmy = (unsigned) ((double)24*100/2.54);
printf("Colores en paleta: %d\n", InfoHeader->colors);
printf("Colores importantes: %d\n", InfoHeader->imxtcolors);
}
/*
------------------------------------------------
Nuestro Código
------------------------------------------------
*/
__global__ void KernelByN (int N, int M, unsigned char *A) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + 3*threadIdx.x;
if(row < M && col < N)
A[row*N+col] = A[row*N+col+1] = A[row*N+col+2] = (A[row*N+col] + A[row*N+col+1] + A[row*N+col+2])/3;
}
int main(int argc, char** argv)
{
unsigned int N, M;
unsigned int numBytes;
unsigned int nBlocks, nThreads;
float TiempoTotal, TiempoKernel;
cudaEvent_t E0, E1, E2, E3;
unsigned char *d_A;
if (argc != 3) { printf("Usage: ./exe img.bmp prefix\n"); exit(0); }
printf("INICIO\n");
bmpInfoHeader header;
unsigned char *image;
image = LoadBMP(argv[1], &header);
unsigned int N3 = header.width * 3;
N = (N3+3) & 0xFFFFFFFC; // Fila multiplo de 4 (BMP)
M = header.height;
// numero de Threads en cada dimension
nThreads = SIZE;
// numero de Blocks en cada dimension
nBlocks = (N+nThreads-1)/nThreads;
numBytes = N * M * sizeof(unsigned char);
dim3 dimGrid(nBlocks, nBlocks, 1);
dim3 dimBlock(nThreads, nThreads, 1);
cudaEventCreate(&E0);
cudaEventCreate(&E1);
cudaEventCreate(&E2);
cudaEventCreate(&E3);
cudaEventRecord(E0, 0);
cudaEventSynchronize(E0);
// Obtener Memoria en el device
cudaMalloc((unsigned char**)&d_A, numBytes);
// Copiar datos desde el host en el device
cudaMemcpy(d_A, image, numBytes, cudaMemcpyHostToDevice);
cudaEventRecord(E1, 0);
cudaEventSynchronize(E1);
// Ejecutar el kernel
KernelByN<<<dimGrid, dimBlock>>>(N, M, d_A);
cudaEventRecord(E2, 0);
cudaEventSynchronize(E2);
// Obtener el resultado desde el host
cudaMemcpy(image, d_A, numBytes, cudaMemcpyDeviceToHost);
// Liberar Memoria del device
cudaFree(d_A);
cudaEventRecord(E3, 0);
cudaEventSynchronize(E3);
cudaEventElapsedTime(&TiempoTotal, E0, E3);
cudaEventElapsedTime(&TiempoKernel, E1, E2);
printf("\nKERNEL ByN\n");
printf("Dimensiones: %dx%d\n", N, M);
printf("nThreads: %dx%d (%d)\n", nThreads, nThreads, nThreads*nThreads);
printf("nBlocks: %dx%d (%d)\n", nBlocks, nBlocks, nBlocks*nBlocks);
printf("Tiempo Global: %4.6f milseg\n", TiempoTotal);
printf("Tiempo Kernel: %4.6f milseg\n", TiempoKernel);
cudaEventDestroy(E0); cudaEventDestroy(E1); cudaEventDestroy(E2); cudaEventDestroy(E3);
char nom[32];
strcpy(nom, argv[2]);
strcat(nom, "_");
strcat(nom,argv[1]);
SaveBMP(nom, &header, image);
}
|
339b45e8fc27823a583789c320b26d29bd426271.hip | // !!! This is a file automatically generated by hipify!!!
#include <omp.h>
#include <iostream>
using namespace std;
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/sequence.h>
#include <thrust/functional.h>
struct gather_functor {
const int* index;
const int* data;
gather_functor(int* _data, int* _index) : data(_data), index(_index) {};
__host__ __device__
int operator()(int i) {
return data[index[i]];
}
};
int main(int argc, char *argv[])
{
if(argc < 3) {
cerr << "Use: size (k) nLoops sequential" << endl;
return(1);
}
int n = atof(argv[1])*1e3;
int nLoops = atof(argv[2]);
int op = atoi(argv[3]);
cout << "Using " << (n/1.e6) << "M elements and averaging over "
<< nLoops << " tests" << endl;
thrust::device_vector<int> d_a(n), d_b(n), d_index(n);
thrust::sequence(d_a.begin(), d_a.end());
thrust::fill(d_b.begin(), d_b.end(),-1);
thrust::host_vector<int> h_index(n);
switch(op) {
case 0:
// Best case: sequential indicies
thrust::sequence(d_index.begin(), d_index.end());
cout << "Sequential data " << endl;
break;
case 1:
// Mid-performance case: random indices
for(int i=0; i < n; i++) h_index[i]=rand()%(n-1);
d_index = h_index; // transfer to device
thrust::sort(d_index.begin(), d_index.end());
cout << "Sorted random data " << endl;
break;
default:
// Worst case: random indices
for(int i=0; i < n; i++) h_index[i]=rand()%(n-1);
d_index = h_index; // transfer to device
cout << "Random data " << endl;
break;
}
double startTime = omp_get_wtime();
for(int i=0; i < nLoops; i++)
thrust::transform(thrust::counting_iterator<unsigned int>(0),
thrust::counting_iterator<unsigned int>(n),
d_b.begin(),
gather_functor(thrust::raw_pointer_cast(&d_a[0]),
thrust::raw_pointer_cast(&d_index[0])));
hipDeviceSynchronize();
double endTime = omp_get_wtime();
// Double check the results
thrust::host_vector<int> h_b = d_b;
thrust::host_vector<int> h_a = d_a;
h_index = d_index;
for(int i=0; i < n; i++) {
if(h_b[i] != h_a[h_index[i]]) {
cout << "Error!" << endl; return(1);
}
}
cout << "Success!" << endl;
cout << "Average time " << (endTime-startTime)/nLoops << endl;
}
| 339b45e8fc27823a583789c320b26d29bd426271.cu | #include <omp.h>
#include <iostream>
using namespace std;
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/sequence.h>
#include <thrust/functional.h>
struct gather_functor {
const int* index;
const int* data;
gather_functor(int* _data, int* _index) : data(_data), index(_index) {};
__host__ __device__
int operator()(int i) {
return data[index[i]];
}
};
int main(int argc, char *argv[])
{
if(argc < 3) {
cerr << "Use: size (k) nLoops sequential" << endl;
return(1);
}
int n = atof(argv[1])*1e3;
int nLoops = atof(argv[2]);
int op = atoi(argv[3]);
cout << "Using " << (n/1.e6) << "M elements and averaging over "
<< nLoops << " tests" << endl;
thrust::device_vector<int> d_a(n), d_b(n), d_index(n);
thrust::sequence(d_a.begin(), d_a.end());
thrust::fill(d_b.begin(), d_b.end(),-1);
thrust::host_vector<int> h_index(n);
switch(op) {
case 0:
// Best case: sequential indicies
thrust::sequence(d_index.begin(), d_index.end());
cout << "Sequential data " << endl;
break;
case 1:
// Mid-performance case: random indices
for(int i=0; i < n; i++) h_index[i]=rand()%(n-1);
d_index = h_index; // transfer to device
thrust::sort(d_index.begin(), d_index.end());
cout << "Sorted random data " << endl;
break;
default:
// Worst case: random indices
for(int i=0; i < n; i++) h_index[i]=rand()%(n-1);
d_index = h_index; // transfer to device
cout << "Random data " << endl;
break;
}
double startTime = omp_get_wtime();
for(int i=0; i < nLoops; i++)
thrust::transform(thrust::counting_iterator<unsigned int>(0),
thrust::counting_iterator<unsigned int>(n),
d_b.begin(),
gather_functor(thrust::raw_pointer_cast(&d_a[0]),
thrust::raw_pointer_cast(&d_index[0])));
cudaDeviceSynchronize();
double endTime = omp_get_wtime();
// Double check the results
thrust::host_vector<int> h_b = d_b;
thrust::host_vector<int> h_a = d_a;
h_index = d_index;
for(int i=0; i < n; i++) {
if(h_b[i] != h_a[h_index[i]]) {
cout << "Error!" << endl; return(1);
}
}
cout << "Success!" << endl;
cout << "Average time " << (endTime-startTime)/nLoops << endl;
}
|
7472e4943a971d7955ac5c4943015a3a63b5a252.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlat2c.cu, mixed zc -> ds, Tue Aug 30 09:38:33 2016
@author Mark Gates
*/
#include "magma_internal.h"
// mixed precision generation has issues with SINGLE PRECISION, so use PRECISION_z
#define PRECISION_d
#define BLK_X 64
#define BLK_Y 32
// TODO get rid of global variable!
static __device__ int flag = 0;
/*
Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to dlag2s and zlaset.
*/
__global__
void dlat2s_lower(
int n,
const double *A, int lda,
float *SA, int ldsa,
double rmax )
{
double tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < n && ind + BLK_X > iby ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
}
}
/*
Similar to dlat2s_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to dlag2s and zlaset.
*/
__global__
void dlat2s_upper(
int n,
const double *A, int lda,
float *SA, int ldsa,
double rmax )
{
double tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < n && ind < iby + BLK_Y ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
}
}
}
/***************************************************************************//**
Purpose
-------
DLAT2S converts a double-real matrix, A,
to a single-real matrix, SA.
RMAX is the overflow for the single-real arithmetic.
DLAT2S checks that all the entries of A are between -RMAX and
RMAX. If not, the conversion is aborted and a flag is raised.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix A to be converted.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
n INTEGER
The number of columns of the matrix A. n >= 0.
@param[in]
A DOUBLE PRECISION array, dimension (LDA,n)
On entry, the n-by-n coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,n).
@param[out]
SA SINGLE PRECISION array, dimension (LDSA,n)
On exit, if INFO=0, the n-by-n coefficient matrix SA;
if INFO > 0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,n).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the SINGLE PRECISION
overflow threshold, in this case, the content
of SA on exit is unspecified.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lat2
*******************************************************************************/
extern "C" void
magmablas_dlat2s_q(
magma_uplo_t uplo, magma_int_t n,
magmaDouble_const_ptr A, magma_int_t lda,
magmaFloat_ptr SA, magma_int_t ldsa,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( lda < max(1,n) )
*info = -4;
else if ( ldsa < max(1,n) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( n == 0 ) {
return;
}
double rmax = (double)lapackf77_slamch("O");
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) );
hipMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
if (uplo == MagmaLower) {
hipLaunchKernelGGL(( dlat2s_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, SA, ldsa, rmax);
}
else if (uplo == MagmaUpper) {
hipLaunchKernelGGL(( dlat2s_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, SA, ldsa, rmax);
}
hipMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
| 7472e4943a971d7955ac5c4943015a3a63b5a252.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlat2c.cu, mixed zc -> ds, Tue Aug 30 09:38:33 2016
@author Mark Gates
*/
#include "magma_internal.h"
// mixed precision generation has issues with SINGLE PRECISION, so use PRECISION_z
#define PRECISION_d
#define BLK_X 64
#define BLK_Y 32
// TODO get rid of global variable!
static __device__ int flag = 0;
/*
Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to dlag2s and zlaset.
*/
__global__
void dlat2s_lower(
int n,
const double *A, int lda,
float *SA, int ldsa,
double rmax )
{
double tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < n && ind + BLK_X > iby ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
}
}
/*
Similar to dlat2s_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to dlag2s and zlaset.
*/
__global__
void dlat2s_upper(
int n,
const double *A, int lda,
float *SA, int ldsa,
double rmax )
{
double tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < n && ind < iby + BLK_Y ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
}
}
}
/***************************************************************************//**
Purpose
-------
DLAT2S converts a double-real matrix, A,
to a single-real matrix, SA.
RMAX is the overflow for the single-real arithmetic.
DLAT2S checks that all the entries of A are between -RMAX and
RMAX. If not, the conversion is aborted and a flag is raised.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix A to be converted.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
n INTEGER
The number of columns of the matrix A. n >= 0.
@param[in]
A DOUBLE PRECISION array, dimension (LDA,n)
On entry, the n-by-n coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,n).
@param[out]
SA SINGLE PRECISION array, dimension (LDSA,n)
On exit, if INFO=0, the n-by-n coefficient matrix SA;
if INFO > 0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,n).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the SINGLE PRECISION
overflow threshold, in this case, the content
of SA on exit is unspecified.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lat2
*******************************************************************************/
extern "C" void
magmablas_dlat2s_q(
magma_uplo_t uplo, magma_int_t n,
magmaDouble_const_ptr A, magma_int_t lda,
magmaFloat_ptr SA, magma_int_t ldsa,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( lda < max(1,n) )
*info = -4;
else if ( ldsa < max(1,n) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( n == 0 ) {
return;
}
double rmax = (double)lapackf77_slamch("O");
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) );
cudaMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
if (uplo == MagmaLower) {
dlat2s_lower<<< grid, threads, 0, queue->cuda_stream() >>> (n, A, lda, SA, ldsa, rmax);
}
else if (uplo == MagmaUpper) {
dlat2s_upper<<< grid, threads, 0, queue->cuda_stream() >>> (n, A, lda, SA, ldsa, rmax);
}
cudaMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
|
93aaad9e5109e0eb53c0920303594912320a1afd.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/fil/fil.h>
#include <cuml/tree/algo_helper.h>
#include <treelite/c_api.h>
#include <treelite/tree.h>
#include <cuml/common/logger.hpp>
#include <cuml/cuml.hpp>
#include <cuml/ensemble/randomforest.hpp>
#include <utility>
#include "benchmark.cuh"
namespace ML {
namespace Bench {
namespace fil {
struct Params {
DatasetParams data;
RegressionParams blobs;
ModelHandle model;
ML::fil::storage_type_t storage;
ML::fil::algo_t algo;
RF_params rf;
int predict_repetitions;
};
class FIL : public RegressionFixture<float> {
typedef RegressionFixture<float> Base;
public:
FIL(const std::string& name, const Params& p)
/*
fitting to linear combinations in "y" normally yields trees that check
values of all significant columns, as well as their linear
combinations in "X". During inference, the exact threshold
values do not affect speed. The distribution of column popularity does
not affect speed barring lots of uninformative columns in succession.
Hence, this method represents real datasets well enough for both
classification and regression.
*/
: RegressionFixture<float>(name, p.data, p.blobs),
model(p.model),
p_rest(p) {}
static void regression_to_classification(float* y, int nrows, int nclasses,
hipStream_t stream) {
raft::linalg::unaryOp(
y, y, nrows,
[=] __device__(float a) {
return float(lroundf(fabsf(a) * 1000. * nclasses) % nclasses);
},
stream);
}
protected:
void runBenchmark(::benchmark::State& state) override {
if (!params.rowMajor) {
state.SkipWithError("FIL only supports row-major inputs");
}
if (params.nclasses > 1) {
// convert regression ranges into [0..nclasses-1]
regression_to_classification(data.y, params.nrows, params.nclasses,
stream);
}
// create model
ML::RandomForestRegressorF rf_model;
auto* mPtr = &rf_model;
mPtr->trees = nullptr;
size_t train_nrows = ::min(params.nrows, 1000);
fit(*handle, mPtr, data.X, train_nrows, params.ncols, data.y, p_rest.rf);
CUDA_CHECK(hipStreamSynchronize(stream));
ML::build_treelite_forest(&model, &rf_model, params.ncols,
params.nclasses > 1 ? 2 : 1);
ML::fil::treelite_params_t tl_params = {
.algo = p_rest.algo,
.output_class = params.nclasses > 1, // cuML RF forest
.threshold = 1.f / params.nclasses, //Fixture::DatasetParams
.storage_type = p_rest.storage};
ML::fil::from_treelite(*handle, &forest, model, &tl_params);
// only time prediction
this->loopOnState(state, [this]() {
// Dataset<D, L> allocates y assuming one output value per input row,
// so not supporting predict_proba yet
for (int i = 0; i < p_rest.predict_repetitions; i++) {
ML::fil::predict(*this->handle, this->forest, this->data.y,
this->data.X, this->params.nrows, false);
}
});
}
void allocateBuffers(const ::benchmark::State& state) override {
Base::allocateBuffers(state);
}
void deallocateBuffers(const ::benchmark::State& state) override {
ML::fil::free(*handle, forest);
Base::deallocateBuffers(state);
}
private:
ML::fil::forest_t forest;
ModelHandle model;
Params p_rest;
};
struct FilBenchParams {
int nrows;
int ncols;
int nclasses;
int max_depth;
int ntrees;
ML::fil::storage_type_t storage;
ML::fil::algo_t algo;
};
std::vector<Params> getInputs() {
std::vector<Params> out;
Params p;
p.data.rowMajor = true;
p.blobs = {
.n_informative = -1, // Just a placeholder value, anyway changed below
.effective_rank = -1, // Just a placeholder value, anyway changed below
.bias = 0.f,
.tail_strength = 0.1,
.noise = 0.01,
.shuffle = false,
.seed = 12345ULL};
set_rf_params(p.rf, // Output RF parameters
1, // n_trees, just a placeholder value,
// anyway changed below
true, // bootstrap
1.f, // max_samples
1234ULL, // seed
8); // n_streams
set_tree_params(p.rf.tree_params, // Output tree parameters
10, // max_depth, just a placeholder value,
// anyway changed below
(1 << 20), // max_leaves
1, // max_features
32, // n_bins
1, // split_algo
3, // min_samples_leaf
3, // min_samples_split
0.0f, // min_impurity_decrease
true, // bootstrap_features
ML::CRITERION::MSE, // split_criterion
false, // quantile_per_tree
false, // use_experimental_backend
128); // max_batch_size
using ML::fil::algo_t;
using ML::fil::storage_type_t;
std::vector<FilBenchParams> var_params = {
{(int)1e6, 20, 1, 5, 1000, storage_type_t::DENSE, algo_t::BATCH_TREE_REORG},
{(int)1e6, 20, 2, 5, 1000, storage_type_t::DENSE,
algo_t::BATCH_TREE_REORG}};
for (auto& i : var_params) {
p.data.nrows = i.nrows;
p.data.ncols = i.ncols;
p.blobs.n_informative = i.ncols / 3;
p.blobs.effective_rank = i.ncols / 3;
p.data.nclasses = i.nclasses;
p.rf.tree_params.max_depth = i.max_depth;
p.rf.n_trees = i.ntrees;
p.storage = i.storage;
p.algo = i.algo;
p.predict_repetitions = 10;
out.push_back(p);
}
return out;
}
ML_BENCH_REGISTER(Params, FIL, "", getInputs());
} // end namespace fil
} // end namespace Bench
} // end namespace ML
| 93aaad9e5109e0eb53c0920303594912320a1afd.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/fil/fil.h>
#include <cuml/tree/algo_helper.h>
#include <treelite/c_api.h>
#include <treelite/tree.h>
#include <cuml/common/logger.hpp>
#include <cuml/cuml.hpp>
#include <cuml/ensemble/randomforest.hpp>
#include <utility>
#include "benchmark.cuh"
namespace ML {
namespace Bench {
namespace fil {
struct Params {
DatasetParams data;
RegressionParams blobs;
ModelHandle model;
ML::fil::storage_type_t storage;
ML::fil::algo_t algo;
RF_params rf;
int predict_repetitions;
};
class FIL : public RegressionFixture<float> {
typedef RegressionFixture<float> Base;
public:
FIL(const std::string& name, const Params& p)
/*
fitting to linear combinations in "y" normally yields trees that check
values of all significant columns, as well as their linear
combinations in "X". During inference, the exact threshold
values do not affect speed. The distribution of column popularity does
not affect speed barring lots of uninformative columns in succession.
Hence, this method represents real datasets well enough for both
classification and regression.
*/
: RegressionFixture<float>(name, p.data, p.blobs),
model(p.model),
p_rest(p) {}
static void regression_to_classification(float* y, int nrows, int nclasses,
cudaStream_t stream) {
raft::linalg::unaryOp(
y, y, nrows,
[=] __device__(float a) {
return float(lroundf(fabsf(a) * 1000. * nclasses) % nclasses);
},
stream);
}
protected:
void runBenchmark(::benchmark::State& state) override {
if (!params.rowMajor) {
state.SkipWithError("FIL only supports row-major inputs");
}
if (params.nclasses > 1) {
// convert regression ranges into [0..nclasses-1]
regression_to_classification(data.y, params.nrows, params.nclasses,
stream);
}
// create model
ML::RandomForestRegressorF rf_model;
auto* mPtr = &rf_model;
mPtr->trees = nullptr;
size_t train_nrows = std::min(params.nrows, 1000);
fit(*handle, mPtr, data.X, train_nrows, params.ncols, data.y, p_rest.rf);
CUDA_CHECK(cudaStreamSynchronize(stream));
ML::build_treelite_forest(&model, &rf_model, params.ncols,
params.nclasses > 1 ? 2 : 1);
ML::fil::treelite_params_t tl_params = {
.algo = p_rest.algo,
.output_class = params.nclasses > 1, // cuML RF forest
.threshold = 1.f / params.nclasses, //Fixture::DatasetParams
.storage_type = p_rest.storage};
ML::fil::from_treelite(*handle, &forest, model, &tl_params);
// only time prediction
this->loopOnState(state, [this]() {
// Dataset<D, L> allocates y assuming one output value per input row,
// so not supporting predict_proba yet
for (int i = 0; i < p_rest.predict_repetitions; i++) {
ML::fil::predict(*this->handle, this->forest, this->data.y,
this->data.X, this->params.nrows, false);
}
});
}
void allocateBuffers(const ::benchmark::State& state) override {
Base::allocateBuffers(state);
}
void deallocateBuffers(const ::benchmark::State& state) override {
ML::fil::free(*handle, forest);
Base::deallocateBuffers(state);
}
private:
ML::fil::forest_t forest;
ModelHandle model;
Params p_rest;
};
struct FilBenchParams {
int nrows;
int ncols;
int nclasses;
int max_depth;
int ntrees;
ML::fil::storage_type_t storage;
ML::fil::algo_t algo;
};
std::vector<Params> getInputs() {
std::vector<Params> out;
Params p;
p.data.rowMajor = true;
p.blobs = {
.n_informative = -1, // Just a placeholder value, anyway changed below
.effective_rank = -1, // Just a placeholder value, anyway changed below
.bias = 0.f,
.tail_strength = 0.1,
.noise = 0.01,
.shuffle = false,
.seed = 12345ULL};
set_rf_params(p.rf, // Output RF parameters
1, // n_trees, just a placeholder value,
// anyway changed below
true, // bootstrap
1.f, // max_samples
1234ULL, // seed
8); // n_streams
set_tree_params(p.rf.tree_params, // Output tree parameters
10, // max_depth, just a placeholder value,
// anyway changed below
(1 << 20), // max_leaves
1, // max_features
32, // n_bins
1, // split_algo
3, // min_samples_leaf
3, // min_samples_split
0.0f, // min_impurity_decrease
true, // bootstrap_features
ML::CRITERION::MSE, // split_criterion
false, // quantile_per_tree
false, // use_experimental_backend
128); // max_batch_size
using ML::fil::algo_t;
using ML::fil::storage_type_t;
std::vector<FilBenchParams> var_params = {
{(int)1e6, 20, 1, 5, 1000, storage_type_t::DENSE, algo_t::BATCH_TREE_REORG},
{(int)1e6, 20, 2, 5, 1000, storage_type_t::DENSE,
algo_t::BATCH_TREE_REORG}};
for (auto& i : var_params) {
p.data.nrows = i.nrows;
p.data.ncols = i.ncols;
p.blobs.n_informative = i.ncols / 3;
p.blobs.effective_rank = i.ncols / 3;
p.data.nclasses = i.nclasses;
p.rf.tree_params.max_depth = i.max_depth;
p.rf.n_trees = i.ntrees;
p.storage = i.storage;
p.algo = i.algo;
p.predict_repetitions = 10;
out.push_back(p);
}
return out;
}
ML_BENCH_REGISTER(Params, FIL, "", getInputs());
} // end namespace fil
} // end namespace Bench
} // end namespace ML
|
02e0d79d8cf1921b466b6b078e7cf7363a2c5683.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2015, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "correlate/oskar_evaluate_auto_power_cuda.h"
#include "correlate/private_correlate_functions_inline.h"
/* Kernels. ================================================================ */
/* Single precision. */
__global__
void oskar_evaluate_auto_power_cudak_f(const int num_sources,
const float4c* restrict jones, float4c* restrict out)
{
float4c val1, val2;
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num_sources) return;
/* Calculate auto-power product at the source. */
OSKAR_LOAD_MATRIX(val1, jones, i);
val2 = val1;
oskar_multiply_complex_matrix_conjugate_transpose_in_place_f(&val1, &val2);
/* Store result. */
out[i] = val1;
}
__global__
void oskar_evaluate_auto_power_scalar_cudak_f(const int num_sources,
const float2* restrict jones, float2* restrict out)
{
float2 val1, val2;
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num_sources) return;
/* Calculate auto-power product at the source. */
val1 = jones[i];
val2 = val1;
oskar_multiply_complex_conjugate_in_place_f(&val1, &val2);
/* Store result. */
out[i] = val1;
}
/* Double precision. */
__global__
void oskar_evaluate_auto_power_cudak_d(const int num_sources,
const double4c* restrict jones, double4c* restrict out)
{
double4c val1, val2;
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num_sources) return;
/* Calculate auto-power product at the source. */
OSKAR_LOAD_MATRIX(val1, jones, i);
val2 = val1;
oskar_multiply_complex_matrix_conjugate_transpose_in_place_d(&val1, &val2);
/* Store result. */
out[i] = val1;
}
__global__
void oskar_evaluate_auto_power_scalar_cudak_d(const int num_sources,
const double2* restrict jones, double2* restrict out)
{
double2 val1, val2;
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num_sources) return;
/* Calculate auto-power product at the source. */
val1 = jones[i];
val2 = val1;
oskar_multiply_complex_conjugate_in_place_d(&val1, &val2);
/* Store result. */
out[i] = val1;
}
#ifdef __cplusplus
extern "C" {
#endif
/* Kernel wrappers. ======================================================== */
/* Single precision. */
void oskar_evaluate_auto_power_cuda_f(int num_sources,
const float4c* d_jones, float4c* d_out)
{
int num_blocks, num_threads = 256;
num_blocks = (num_sources + num_threads - 1) / num_threads;
oskar_evaluate_auto_power_cudak_f
OSKAR_CUDAK_CONF(num_blocks, num_threads) (num_sources, d_jones, d_out);
}
void oskar_evaluate_auto_power_scalar_cuda_f(int num_sources,
const float2* d_jones, float2* d_out)
{
int num_blocks, num_threads = 256;
num_blocks = (num_sources + num_threads - 1) / num_threads;
oskar_evaluate_auto_power_scalar_cudak_f
OSKAR_CUDAK_CONF(num_blocks, num_threads) (num_sources, d_jones, d_out);
}
/* Double precision. */
void oskar_evaluate_auto_power_cuda_d(int num_sources,
const double4c* d_jones, double4c* d_out)
{
int num_blocks, num_threads = 256;
num_blocks = (num_sources + num_threads - 1) / num_threads;
oskar_evaluate_auto_power_cudak_d
OSKAR_CUDAK_CONF(num_blocks, num_threads) (num_sources, d_jones, d_out);
}
void oskar_evaluate_auto_power_scalar_cuda_d(int num_sources,
const double2* d_jones, double2* d_out)
{
int num_blocks, num_threads = 256;
num_blocks = (num_sources + num_threads - 1) / num_threads;
oskar_evaluate_auto_power_scalar_cudak_d
OSKAR_CUDAK_CONF(num_blocks, num_threads) (num_sources, d_jones, d_out);
}
#ifdef __cplusplus
}
#endif
| 02e0d79d8cf1921b466b6b078e7cf7363a2c5683.cu | /*
* Copyright (c) 2015, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "correlate/oskar_evaluate_auto_power_cuda.h"
#include "correlate/private_correlate_functions_inline.h"
/* Kernels. ================================================================ */
/* Single precision. */
__global__
void oskar_evaluate_auto_power_cudak_f(const int num_sources,
const float4c* restrict jones, float4c* restrict out)
{
float4c val1, val2;
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num_sources) return;
/* Calculate auto-power product at the source. */
OSKAR_LOAD_MATRIX(val1, jones, i);
val2 = val1;
oskar_multiply_complex_matrix_conjugate_transpose_in_place_f(&val1, &val2);
/* Store result. */
out[i] = val1;
}
__global__
void oskar_evaluate_auto_power_scalar_cudak_f(const int num_sources,
const float2* restrict jones, float2* restrict out)
{
float2 val1, val2;
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num_sources) return;
/* Calculate auto-power product at the source. */
val1 = jones[i];
val2 = val1;
oskar_multiply_complex_conjugate_in_place_f(&val1, &val2);
/* Store result. */
out[i] = val1;
}
/* Double precision. */
__global__
void oskar_evaluate_auto_power_cudak_d(const int num_sources,
const double4c* restrict jones, double4c* restrict out)
{
double4c val1, val2;
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num_sources) return;
/* Calculate auto-power product at the source. */
OSKAR_LOAD_MATRIX(val1, jones, i);
val2 = val1;
oskar_multiply_complex_matrix_conjugate_transpose_in_place_d(&val1, &val2);
/* Store result. */
out[i] = val1;
}
__global__
void oskar_evaluate_auto_power_scalar_cudak_d(const int num_sources,
const double2* restrict jones, double2* restrict out)
{
double2 val1, val2;
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num_sources) return;
/* Calculate auto-power product at the source. */
val1 = jones[i];
val2 = val1;
oskar_multiply_complex_conjugate_in_place_d(&val1, &val2);
/* Store result. */
out[i] = val1;
}
#ifdef __cplusplus
extern "C" {
#endif
/* Kernel wrappers. ======================================================== */
/* Single precision. */
void oskar_evaluate_auto_power_cuda_f(int num_sources,
const float4c* d_jones, float4c* d_out)
{
int num_blocks, num_threads = 256;
num_blocks = (num_sources + num_threads - 1) / num_threads;
oskar_evaluate_auto_power_cudak_f
OSKAR_CUDAK_CONF(num_blocks, num_threads) (num_sources, d_jones, d_out);
}
void oskar_evaluate_auto_power_scalar_cuda_f(int num_sources,
const float2* d_jones, float2* d_out)
{
int num_blocks, num_threads = 256;
num_blocks = (num_sources + num_threads - 1) / num_threads;
oskar_evaluate_auto_power_scalar_cudak_f
OSKAR_CUDAK_CONF(num_blocks, num_threads) (num_sources, d_jones, d_out);
}
/* Double precision. */
void oskar_evaluate_auto_power_cuda_d(int num_sources,
const double4c* d_jones, double4c* d_out)
{
int num_blocks, num_threads = 256;
num_blocks = (num_sources + num_threads - 1) / num_threads;
oskar_evaluate_auto_power_cudak_d
OSKAR_CUDAK_CONF(num_blocks, num_threads) (num_sources, d_jones, d_out);
}
void oskar_evaluate_auto_power_scalar_cuda_d(int num_sources,
const double2* d_jones, double2* d_out)
{
int num_blocks, num_threads = 256;
num_blocks = (num_sources + num_threads - 1) / num_threads;
oskar_evaluate_auto_power_scalar_cudak_d
OSKAR_CUDAK_CONF(num_blocks, num_threads) (num_sources, d_jones, d_out);
}
#ifdef __cplusplus
}
#endif
|
6a5030d4d6a34208816fabb3d27eaa82b424df16.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathPointwise.hip"
#else
#include <ATen/MemoryOverlap.h>
#ifdef BUILD_NAMEDTENSOR
#include <ATen/NamedTensorUtils.h>
#endif
void THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitand is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitAndOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitAndOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitOrOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitOrOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitXorOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitXorOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(sign)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSignOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorSignOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#ifdef BUILD_NAMEDTENSOR
at::namedinference::propagate_names(self_, src);
#endif
}
void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#if !defined(THC_REAL_IS_BOOL)
static void propagate_names_if_named_tensor_enabled(THCTensor* result, THCTensor* src) {
#ifdef BUILD_NAMEDTENSOR
at::namedinference::propagate_names(result, src);
#endif
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \
struct Tensor_##NAME##_##REAL##_Op { \
__device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(scalar_t* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \
at::assert_no_internal_overlap(self_); \
if (self_ == src) { \
if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCTensor_(resizeAs)(state, self_, src); \
\
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(hipGetLastError()); \
propagate_names_if_named_tensor_enabled(self_, src); \
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<scalar_t>::log, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(lgamma, THCNumerics<scalar_t>::lgamma, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log10, THCNumerics<scalar_t>::log10, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<scalar_t>::log1p, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log2, THCNumerics<scalar_t>::log2, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<scalar_t>::exp, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(expm1, THCNumerics<scalar_t>::expm1, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<scalar_t>::cos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<scalar_t>::sin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics<scalar_t>::rsqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( ceil, THCNumerics<scalar_t>::ceil, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics<scalar_t>::floor, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<scalar_t>::trunc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<scalar_t>::acos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<scalar_t>::cosh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<scalar_t>::asin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<scalar_t>::sinh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<scalar_t>::tan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<scalar_t>::atan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<scalar_t>::tanh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<scalar_t>::erf, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics<scalar_t>::erfc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(erfinv, THCNumerics<scalar_t>::erfinv,Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( round, THCNumerics<scalar_t>::round, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<scalar_t>::frac, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<scalar_t>::cinv, Real)
#endif
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<scalar_t>::abs, Real)
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t min_value,
scalar_t max_value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y));
int64_t sx = THCTensor_(stride)(state, x, dimension);
int64_t sy = THCTensor_(stride)(state, y, dimension);
int64_t so = THCTensor_(stride)(state, self, dimension);
THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1);
THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1);
THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCTensor_(free)(state, nx);
THCTensor_(free)(state, ny);
THCTensor_(free)(state, nself);
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSigmoidOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorSigmoidOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#ifdef BUILD_NAMEDTENSOR
at::namedinference::propagate_names(self_, src);
#endif
}
void THCTensor_(digamma)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ != src) {
THCTensor_(resizeAs)(state, self_, src);
}
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorDigammaOp<scalar_t, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
#ifdef BUILD_NAMEDTENSOR
at::namedinference::propagate_names(self_, src);
#endif
}
void THCTensor_(polygamma)(THCState* state, THCTensor* self_, int64_t n, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ != src) {
THCTensor_(resizeAs)(state, self_, src);
}
switch (n) {
case 0:
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorDigammaOp<scalar_t, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
break;
case 1:
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorTrigammaOp<scalar_t, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
break;
default:
THError("polygamma(n,x) is not implemented for n>=2");
}
THCudaCheck(hipGetLastError());
#ifdef BUILD_NAMEDTENSOR
at::namedinference::propagate_names(self_, src);
#endif
}
#endif
namespace {
c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) {
c10::raw::intrusive_ptr::incref(self);
return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self);
}
}
void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::add_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::sub_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(cpow)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self = pow(self, src2)
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorCPowOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = pow(src1, src2)
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorCPowOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(pow)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(1))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, 1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(2))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, 2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(3))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, 3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-1))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, -1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-2))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, -2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#endif
} else {
// fallback implementation using pow
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, -3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(1))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, 1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(2))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, 2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(3))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, 3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-1))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, -1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-2))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, -2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#endif
} else {
// fallback implementation using pow
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, -3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(tpow)(THCState *state, THCTensor *self_, scalar_t value, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorTPowOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorTPowOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::div_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("clshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorLShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorLShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("crshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorRShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorRShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#endif
#endif
| 6a5030d4d6a34208816fabb3d27eaa82b424df16.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathPointwise.cu"
#else
#include <ATen/MemoryOverlap.h>
#ifdef BUILD_NAMEDTENSOR
#include <ATen/NamedTensorUtils.h>
#endif
void THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitand is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitAndOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitAndOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitOrOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitOrOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitXorOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitXorOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(sign)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSignOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorSignOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#ifdef BUILD_NAMEDTENSOR
at::namedinference::propagate_names(self_, src);
#endif
}
void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#if !defined(THC_REAL_IS_BOOL)
static void propagate_names_if_named_tensor_enabled(THCTensor* result, THCTensor* src) {
#ifdef BUILD_NAMEDTENSOR
at::namedinference::propagate_names(result, src);
#endif
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \
struct Tensor_##NAME##_##REAL##_Op { \
__device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(scalar_t* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \
at::assert_no_internal_overlap(self_); \
if (self_ == src) { \
if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCTensor_(resizeAs)(state, self_, src); \
\
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(cudaGetLastError()); \
propagate_names_if_named_tensor_enabled(self_, src); \
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<scalar_t>::log, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(lgamma, THCNumerics<scalar_t>::lgamma, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log10, THCNumerics<scalar_t>::log10, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<scalar_t>::log1p, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log2, THCNumerics<scalar_t>::log2, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<scalar_t>::exp, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(expm1, THCNumerics<scalar_t>::expm1, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<scalar_t>::cos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<scalar_t>::sin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics<scalar_t>::rsqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( ceil, THCNumerics<scalar_t>::ceil, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics<scalar_t>::floor, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<scalar_t>::trunc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<scalar_t>::acos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<scalar_t>::cosh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<scalar_t>::asin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<scalar_t>::sinh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<scalar_t>::tan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<scalar_t>::atan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<scalar_t>::tanh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<scalar_t>::erf, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics<scalar_t>::erfc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(erfinv, THCNumerics<scalar_t>::erfinv,Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( round, THCNumerics<scalar_t>::round, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<scalar_t>::frac, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<scalar_t>::cinv, Real)
#endif
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<scalar_t>::abs, Real)
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t min_value,
scalar_t max_value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y));
int64_t sx = THCTensor_(stride)(state, x, dimension);
int64_t sy = THCTensor_(stride)(state, y, dimension);
int64_t so = THCTensor_(stride)(state, self, dimension);
THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1);
THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1);
THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCTensor_(free)(state, nx);
THCTensor_(free)(state, ny);
THCTensor_(free)(state, nself);
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSigmoidOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorSigmoidOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#ifdef BUILD_NAMEDTENSOR
at::namedinference::propagate_names(self_, src);
#endif
}
void THCTensor_(digamma)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ != src) {
THCTensor_(resizeAs)(state, self_, src);
}
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorDigammaOp<scalar_t, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
#ifdef BUILD_NAMEDTENSOR
at::namedinference::propagate_names(self_, src);
#endif
}
void THCTensor_(polygamma)(THCState* state, THCTensor* self_, int64_t n, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ != src) {
THCTensor_(resizeAs)(state, self_, src);
}
switch (n) {
case 0:
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorDigammaOp<scalar_t, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
break;
case 1:
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorTrigammaOp<scalar_t, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
break;
default:
THError("polygamma(n,x) is not implemented for n>=2");
}
THCudaCheck(cudaGetLastError());
#ifdef BUILD_NAMEDTENSOR
at::namedinference::propagate_names(self_, src);
#endif
}
#endif
namespace {
c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) {
c10::raw::intrusive_ptr::incref(self);
return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self);
}
}
void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::add_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::sub_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(cpow)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self = pow(self, src2)
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorCPowOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = pow(src1, src2)
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorCPowOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(pow)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(1))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, 1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(2))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, 2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(3))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, 3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-1))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, -1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-2))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, -2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#endif
} else {
// fallback implementation using pow
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, -3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(1))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, 1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(2))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, 2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(3))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, 3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-1))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, -1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-2))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, -2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#endif
} else {
// fallback implementation using pow
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, -3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(tpow)(THCState *state, THCTensor *self_, scalar_t value, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorTPowOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorTPowOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::div_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("clshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorLShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorLShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("crshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorRShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorRShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#endif
#endif
|
27b71e54ff711e02bd7b9a3d328196a6aa18219c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpu_calculate_force_square_max.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int size = 1;
const int number_of_rounds = 1;
const double *force_per_atom = NULL;
hipMalloc(&force_per_atom, XSIZE*YSIZE);
double *force_square_max = NULL;
hipMalloc(&force_square_max, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpu_calculate_force_square_max), dim3(gridBlock),dim3(threadBlock), 0, 0, size,number_of_rounds,force_per_atom,force_square_max);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpu_calculate_force_square_max), dim3(gridBlock),dim3(threadBlock), 0, 0, size,number_of_rounds,force_per_atom,force_square_max);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpu_calculate_force_square_max), dim3(gridBlock),dim3(threadBlock), 0, 0, size,number_of_rounds,force_per_atom,force_square_max);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 27b71e54ff711e02bd7b9a3d328196a6aa18219c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpu_calculate_force_square_max.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int size = 1;
const int number_of_rounds = 1;
const double *force_per_atom = NULL;
cudaMalloc(&force_per_atom, XSIZE*YSIZE);
double *force_square_max = NULL;
cudaMalloc(&force_square_max, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpu_calculate_force_square_max<<<gridBlock,threadBlock>>>(size,number_of_rounds,force_per_atom,force_square_max);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpu_calculate_force_square_max<<<gridBlock,threadBlock>>>(size,number_of_rounds,force_per_atom,force_square_max);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpu_calculate_force_square_max<<<gridBlock,threadBlock>>>(size,number_of_rounds,force_per_atom,force_square_max);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
3536685252c56700e2992369b209ea2c473876a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@precisions normal z -> s d c
@author Mark Gates
@author Azzam Haidar
*/
#include "common_magma.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to zlaset.
*/
static __device__
void zlacpy_full_device(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to zlacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to zlaset.
*/
static __device__
void zlacpy_lower_device(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to zlacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to zlaset.
*/
static __device__
void zlacpy_upper_device(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
/*
kernel wrapper to call the device function.
*/
__global__
void zlacpy_full_kernel(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
zlacpy_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void zlacpy_lower_kernel(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
zlacpy_lower_device(m, n, dA, ldda, dB, lddb);
}
__global__
void zlacpy_upper_kernel(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
zlacpy_upper_device(m, n, dA, ldda, dB, lddb);
}
/*
kernel wrapper to call the device function for the batched routine.
*/
__global__
void zlacpy_full_kernel_batched(
int m, int n,
magmaDoubleComplex const * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
zlacpy_full_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void zlacpy_lower_kernel_batched(
int m, int n,
magmaDoubleComplex const * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
zlacpy_lower_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void zlacpy_upper_kernel_batched(
int m, int n,
magmaDoubleComplex const * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
zlacpy_upper_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
/**
Purpose
-------
ZLACPY_Q copies all or part of a two-dimensional matrix dA to another
matrix dB.
This is the same as ZLACPY, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The m by n matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[out]
dB COMPLEX_16 array, dimension (LDDB,N)
The m by n matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlacpy_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X, 1 );
dim3 grid( (m + BLK_X - 1)/BLK_X, (n + BLK_Y - 1)/BLK_Y );
if ( uplo == MagmaLower ) {
hipLaunchKernelGGL(( zlacpy_lower_kernel), dim3(grid), dim3(threads), 0, queue , m, n, dA, ldda, dB, lddb );
}
else if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( zlacpy_upper_kernel), dim3(grid), dim3(threads), 0, queue , m, n, dA, ldda, dB, lddb );
}
else {
hipLaunchKernelGGL(( zlacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue , m, n, dA, ldda, dB, lddb );
}
}
/**
@see magmablas_zlacpy_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlacpy(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr dB, magma_int_t lddb )
{
magmablas_zlacpy_q( uplo, m, n, dA, ldda, dB, lddb, magma_stream );
}
/**
Purpose
-------
ZLACPY_BATCHED_Q copies all or part of each two-dimensional matrix
dAarray[i] to matrix dBarray[i], for 0 <= i < batchcount.
This is the same as ZLACPY_BATCHED, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of each matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of each matrix dA
@param[in]
m INTEGER
The number of rows of each matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dA. N >= 0.
@param[in]
dAarray COMPLEX_16* array, dimension (batchCount)
array of pointers to the matrices dA, where each dA is of dimension (LDDA,N)
The m by n matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of each array dA. LDDA >= max(1,M).
@param[out]
dBarray COMPLEX_16* array, dimension (batchCount)
array of pointers to the matrices dB, where each dB is of dimension (LDDB,N)
The m by n matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of each array dB. LDDB >= max(1,M).
@param[in]
batchCount Number of matrices in dAarray and dBarray.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlacpy_batched_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaDoubleComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( BLK_X, 1, 1 );
dim3 grid( (m + BLK_X - 1)/BLK_X, (n + BLK_Y - 1)/BLK_Y, batchCount );
if ( uplo == MagmaLower ) {
hipLaunchKernelGGL(( zlacpy_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , m, n, dAarray, ldda, dBarray, lddb );
}
else if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( zlacpy_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , m, n, dAarray, ldda, dBarray, lddb );
}
else {
hipLaunchKernelGGL(( zlacpy_full_kernel_batched) , dim3(grid), dim3(threads), 0, queue , m, n, dAarray, ldda, dBarray, lddb );
}
}
/**
@see magmablas_zlacpy_batched_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlacpy_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaDoubleComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount )
{
magmablas_zlacpy_batched_q( uplo, m, n, dAarray, ldda, dBarray, lddb, batchCount, magma_stream );
}
| 3536685252c56700e2992369b209ea2c473876a9.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@precisions normal z -> s d c
@author Mark Gates
@author Azzam Haidar
*/
#include "common_magma.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to zlaset.
*/
static __device__
void zlacpy_full_device(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to zlacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to zlaset.
*/
static __device__
void zlacpy_lower_device(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to zlacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to zlaset.
*/
static __device__
void zlacpy_upper_device(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
/*
kernel wrapper to call the device function.
*/
__global__
void zlacpy_full_kernel(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
zlacpy_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void zlacpy_lower_kernel(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
zlacpy_lower_device(m, n, dA, ldda, dB, lddb);
}
__global__
void zlacpy_upper_kernel(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
zlacpy_upper_device(m, n, dA, ldda, dB, lddb);
}
/*
kernel wrapper to call the device function for the batched routine.
*/
__global__
void zlacpy_full_kernel_batched(
int m, int n,
magmaDoubleComplex const * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
zlacpy_full_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void zlacpy_lower_kernel_batched(
int m, int n,
magmaDoubleComplex const * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
zlacpy_lower_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void zlacpy_upper_kernel_batched(
int m, int n,
magmaDoubleComplex const * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
zlacpy_upper_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
/**
Purpose
-------
ZLACPY_Q copies all or part of a two-dimensional matrix dA to another
matrix dB.
This is the same as ZLACPY, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The m by n matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[out]
dB COMPLEX_16 array, dimension (LDDB,N)
The m by n matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlacpy_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X, 1 );
dim3 grid( (m + BLK_X - 1)/BLK_X, (n + BLK_Y - 1)/BLK_Y );
if ( uplo == MagmaLower ) {
zlacpy_lower_kernel<<< grid, threads, 0, queue >>> ( m, n, dA, ldda, dB, lddb );
}
else if ( uplo == MagmaUpper ) {
zlacpy_upper_kernel<<< grid, threads, 0, queue >>> ( m, n, dA, ldda, dB, lddb );
}
else {
zlacpy_full_kernel <<< grid, threads, 0, queue >>> ( m, n, dA, ldda, dB, lddb );
}
}
/**
@see magmablas_zlacpy_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlacpy(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr dB, magma_int_t lddb )
{
magmablas_zlacpy_q( uplo, m, n, dA, ldda, dB, lddb, magma_stream );
}
/**
Purpose
-------
ZLACPY_BATCHED_Q copies all or part of each two-dimensional matrix
dAarray[i] to matrix dBarray[i], for 0 <= i < batchcount.
This is the same as ZLACPY_BATCHED, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of each matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of each matrix dA
@param[in]
m INTEGER
The number of rows of each matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dA. N >= 0.
@param[in]
dAarray COMPLEX_16* array, dimension (batchCount)
array of pointers to the matrices dA, where each dA is of dimension (LDDA,N)
The m by n matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of each array dA. LDDA >= max(1,M).
@param[out]
dBarray COMPLEX_16* array, dimension (batchCount)
array of pointers to the matrices dB, where each dB is of dimension (LDDB,N)
The m by n matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of each array dB. LDDB >= max(1,M).
@param[in]
batchCount Number of matrices in dAarray and dBarray.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlacpy_batched_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaDoubleComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( BLK_X, 1, 1 );
dim3 grid( (m + BLK_X - 1)/BLK_X, (n + BLK_Y - 1)/BLK_Y, batchCount );
if ( uplo == MagmaLower ) {
zlacpy_lower_kernel_batched<<< grid, threads, 0, queue >>> ( m, n, dAarray, ldda, dBarray, lddb );
}
else if ( uplo == MagmaUpper ) {
zlacpy_upper_kernel_batched<<< grid, threads, 0, queue >>> ( m, n, dAarray, ldda, dBarray, lddb );
}
else {
zlacpy_full_kernel_batched <<< grid, threads, 0, queue >>> ( m, n, dAarray, ldda, dBarray, lddb );
}
}
/**
@see magmablas_zlacpy_batched_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlacpy_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaDoubleComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount )
{
magmablas_zlacpy_batched_q( uplo, m, n, dAarray, ldda, dBarray, lddb, batchCount, magma_stream );
}
|
libcu.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
extern "C" {
#include "libcu.h"
#define BLOCKSIZE 512
#define WARPSIZE 32
#define NCHUNKS 20
#define SATURATION_SCALE 7.0f
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void __global__ moveouts_kernel(int* moveouts, int *moveouts_mm,
int *test_sources, int *station_indexes,
int n_test,
int n_stations_whole_array,
int n_stations_restricted_array) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int s, ss, max_moveout, min_moveout;
float moveout = 0.;
if(idx >= n_test) return;
max_moveout=0;
min_moveout=1000000; // shoud be MAX_INT
for (s = 0; s < n_stations_restricted_array; s++) {
// map from the whole stations array to the restricted array
ss = station_indexes[test_sources[idx] * n_stations_restricted_array + s];
moveout = moveouts[test_sources[idx] * n_stations_whole_array + ss];
if (moveout > max_moveout) {
max_moveout = moveout;
}
if (moveout < min_moveout) {
min_moveout = moveout;
}
}
moveouts_mm[idx * 2 + 0] = min_moveout;
moveouts_mm[idx * 2 + 1] = max_moveout;
}
void __global__ stack_S_kernel(float* tracesN,
float* tracesE,
int* moveouts,
int* moveouts_minmax,
int* station_indexes,
int* test_sources,
float* nw_response,
int* biggest_idx,
int n_samples,
int n_test,
int n_stations_whole_array,
int n_stations_restricted_array) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float network_response_max, sum_beamform;
int i, s, ss, source_index_whole_array, source_index_restricted_array, network_resp_idx_max, shift, moveout;
float *tracesN_, *tracesE_; // local pointers
if(idx >= n_samples){
return;
}
network_response_max = 0.0;
for (i = 0; i < n_test; i++) {
source_index_whole_array = test_sources[i] * n_stations_whole_array; // position on the moveouts vector
source_index_restricted_array = test_sources[i] * n_stations_restricted_array; // position on the station indexes vector
sum_beamform = 0.0;
shift = idx - moveouts_minmax[i * 2 + 0]; // position on the time axis
if (shift < 0) continue; // don't do anything before time 0
// ----------------------------------------------------------
// define the local pointers
tracesN_ = tracesN + shift;
tracesE_ = tracesE + shift;
// ----------------------------------------------------------
for (s = 0; s < n_stations_restricted_array; s++) {
// map from the restricted array (closest stations) to the whole array of stations
ss = station_indexes[source_index_restricted_array + s];
moveout = moveouts[source_index_whole_array + ss];
if(shift + moveout < n_samples){
// rotate the traces to get the transverse component and stack them
sum_beamform += __ldg(&(tracesN_[ss * n_samples + moveout])) + \
__ldg(&(tracesE_[ss * n_samples + moveout]));
}
}
if (sum_beamform > network_response_max) {
network_response_max = sum_beamform;
network_resp_idx_max = test_sources[i];
}
}
nw_response[idx] = network_response_max;
biggest_idx[idx] = network_resp_idx_max;
}
void __global__ stack_SP_kernel(float* tracesH,
float* tracesZ,
int* moveouts_P,
int* moveouts_S,
int* moveouts_minmax,
int* station_indexes,
int* test_sources,
float* nw_response,
int* biggest_idx,
int n_samples,
int n_samples_thread,
int n_test,
int n_stations_whole_array,
int n_stations_restricted_array) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float network_response_max, sum_beamform_SP, sum_beamform_S, sum_beamform_P;
int i, s, ss, source_index_whole_array, source_index_restricted_array, network_resp_idx_max, shift, moveout_P, moveout_S;
float traceH_, traceZ_; // samples to stack
if(idx >= n_samples_thread){
return;
}
network_response_max = -INFINITY;
for (i = 0; i < n_test; i++) {
source_index_whole_array = test_sources[i] * n_stations_whole_array; // position on the moveouts vector
source_index_restricted_array = test_sources[i] * n_stations_restricted_array; // position on the station indexes vector
sum_beamform_SP = 0.0;
sum_beamform_S = 0.0;
sum_beamform_P = 0.0;
shift = idx - moveouts_minmax[i * 2 + 0]; // position on the time axis
if (shift < 0) continue; // don't do anything before time 0
for (s = 0; s < n_stations_restricted_array; s++) {
// map from the closest stations to the whole array of stations
ss = station_indexes[source_index_restricted_array + s];
moveout_P = moveouts_P[source_index_whole_array + ss];
moveout_S = moveouts_S[source_index_whole_array + ss];
if(shift + moveout_S < n_samples){
// !!! shift + moveout can still be > n_samples
traceH_ = __ldg(&(tracesH[ss * n_samples + shift + moveout_S]));
traceZ_ = __ldg(&(tracesZ[ss * n_samples + shift + moveout_P]));
sum_beamform_S += 2. * traceH_;
sum_beamform_P += traceZ_;
}
}
sum_beamform_SP = sum_beamform_S + sum_beamform_P;
if (sum_beamform_SP > network_response_max) {
network_response_max = sum_beamform_SP;
network_resp_idx_max = test_sources[i];
}
}
nw_response[idx] = network_response_max;
biggest_idx[idx] = network_resp_idx_max;
}
void network_response(int* test_points,
float* tracesN,
float* tracesE,
int* moveouts,
int* st_idx,
int n_test,
int n_samples,
float* network_response,
int* biggest_idx,
int n_stations_whole_array,
int n_stations_restricted_array,
int n_sources) {
// cuda device vars
int *t_sources_d;
float *traces_N_d;
float *traces_E_d;
int *moveouts_d;
int *st_idx_d;
float *nw_response_d;
int *biggest_idx_d;
int *moveouts_minmax_d;
int ngpus = 0;
int GRID_SIZE = 1024;
hipError_t cuda_result;
// check how many devices are available
hipGetDeviceCount(&ngpus);
printf("%d cuda devices found on the node\n", ngpus);
/*for (int n=0; n<ngpus; n++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, n);
printf("Device nb %i, name %i \n", n, prop.name);
printf("Total memory: %i \n", prop.totalGlobalMem);
printf("Shared memory per block: %i \n", prop.sharedMemPerBlock);
}*/
size_t free, total;
printf("\n");
hipMemGetInfo(&free,&total);
printf("%d KB free of total %d KB\n",free/1024,total/1024);
// allocate memory on device
cuda_result = hipMalloc((void**)&t_sources_d, n_test * sizeof(int));
if (cuda_result != hipSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = hipMalloc((void**)&traces_N_d, n_samples * n_stations_whole_array * sizeof(float));
if (cuda_result != hipSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = hipMalloc((void**)&traces_E_d, n_samples * n_stations_whole_array * sizeof(float));
if (cuda_result != hipSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = hipMalloc((void**)&moveouts_d, n_sources * n_stations_restricted_array * sizeof(int));
if (cuda_result != hipSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = hipMalloc((void**)&st_idx_d, n_sources * n_stations_restricted_array * sizeof(int));
if (cuda_result != hipSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = hipMalloc((void**)&nw_response_d, n_samples * sizeof(float));
if (cuda_result != hipSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = hipMalloc((void**)&biggest_idx_d, n_samples * sizeof(int));
if (cuda_result != hipSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = hipMalloc((void**)&moveouts_minmax_d, 2 * n_test * sizeof(int));
if (cuda_result != hipSuccess) printf("Problem with the allocation of memory !\n");
// transfer from CPU to GPU
cuda_result = hipMemcpy(t_sources_d, test_points, n_test * sizeof(int),
hipMemcpyHostToDevice);
if (cuda_result != hipSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cuda_result = hipMemcpy(traces_N_d, tracesN, n_samples * n_stations_whole_array * sizeof(float),
hipMemcpyHostToDevice);
if (cuda_result != hipSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cuda_result = hipMemcpy(traces_E_d, tracesE, n_samples * n_stations_whole_array * sizeof(float),
hipMemcpyHostToDevice);
if (cuda_result != hipSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cuda_result = hipMemcpy(moveouts_d, moveouts, n_sources * n_stations_restricted_array * sizeof(int),
hipMemcpyHostToDevice);
if (cuda_result != hipSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cuda_result = hipMemcpy(st_idx_d, st_idx, n_sources * n_stations_restricted_array * sizeof(int),
hipMemcpyHostToDevice);
if (cuda_result != hipSuccess) printf("Problem when transfering memory from CPU to GPU \n");
hipMemGetInfo(&free,&total);
printf("%d KB free of total %d KB\n",free/1024,total/1024);
//hipFuncSetCacheConfig("stack_kernel", hipFuncCachePreferL1);
hipLaunchKernelGGL(( moveouts_kernel), dim3(ceil(n_test/(float)GRID_SIZE)),dim3(GRID_SIZE), 0, 0, moveouts_d,
moveouts_minmax_d,
t_sources_d,
st_idx_d,
n_test,
n_stations_whole_array,
n_stations_restricted_array);
//printf("Number of calls: %.2f \n", ceil(n_samples/(float)GRID_SIZE));
hipLaunchKernelGGL(( stack_S_kernel), dim3(ceil(n_samples/(float)GRID_SIZE)),dim3(GRID_SIZE), 0, 0, traces_N_d,
traces_E_d,
moveouts_d,
moveouts_minmax_d,
st_idx_d,
t_sources_d,
nw_response_d,
biggest_idx_d,
n_samples,
n_test,
n_stations_whole_array,
n_stations_restricted_array);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
cuda_result = hipMemcpy(network_response, nw_response_d,n_samples * sizeof(float),
hipMemcpyDeviceToHost);
if (cuda_result != hipSuccess) printf("Problem with the transfer of memory !\n");
cuda_result = hipMemcpy(biggest_idx, biggest_idx_d, n_samples * sizeof(int),
hipMemcpyDeviceToHost);
if (cuda_result != hipSuccess) printf("Problem with the transfer of memory !\n");
hipFree(t_sources_d);
hipFree(traces_N_d);
hipFree(traces_E_d);
hipFree(moveouts_d);
hipFree(st_idx_d);
hipFree(nw_response_d);
hipFree(biggest_idx_d);
hipFree(moveouts_minmax_d);
}
void network_response_SP(int* test_points,
float* traces_H,
float* traces_Z,
int* moveouts_P,
int* moveouts_S,
int* st_idx,
int n_test,
int n_samples,
float* network_response,
int* biggest_idx,
int n_stations_whole_array,
int n_stations_restricted_array,
int n_sources) {
int nGPUs = 0;
// check how many devices are available
hipGetDeviceCount(&nGPUs);
omp_set_num_threads(nGPUs);
printf("%d cuda devices found on the node\n", nGPUs);
size_t free, total;
printf("\n");
hipMemGetInfo(&free,&total);
printf("%d KB free of total %d KB\n",free/1024,total/1024);
#pragma omp parallel shared(test_points, traces_H, traces_Z, moveouts_P, moveouts_S, st_idx, \
network_response, biggest_idx)
{
int id = omp_get_thread_num();
hipSetDevice(id);
dim3 BS(512);
int n_samples_thread = n_samples/nGPUs;
int device_shift = id*n_samples_thread;
if (id == nGPUs){
n_samples_thread += n_samples - (n_samples/nGPUs)*nGPUs;
}
// cuda device vars
int *t_sources_d;
float *traces_H_d;
float *traces_Z_d;
int *moveouts_P_d;
int *moveouts_S_d;
int *st_idx_d;
float *nw_response_d;
int *biggest_idx_d;
int *moveouts_minmax_d;
hipError_t cuda_result;
// allocate memory on device
cuda_result = hipMalloc((void**)&t_sources_d, n_test * sizeof(int));
if (cuda_result != hipSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = hipMalloc((void**)&traces_H_d, n_samples * n_stations_whole_array * sizeof(float));
if (cuda_result != hipSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = hipMalloc((void**)&traces_Z_d, n_samples * n_stations_whole_array * sizeof(float));
if (cuda_result != hipSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = hipMalloc((void**)&moveouts_S_d, n_sources * n_stations_whole_array * sizeof(int));
if (cuda_result != hipSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = hipMalloc((void**)&moveouts_P_d, n_sources * n_stations_whole_array * sizeof(int));
if (cuda_result != hipSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = hipMalloc((void**)&st_idx_d, n_sources * n_stations_restricted_array * sizeof(int));
if (cuda_result != hipSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = hipMalloc((void**)&nw_response_d, n_samples_thread * sizeof(float));
if (cuda_result != hipSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = hipMalloc((void**)&biggest_idx_d, n_samples_thread * sizeof(int));
if (cuda_result != hipSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = hipMalloc((void**)&moveouts_minmax_d, 2 * n_test * sizeof(int));
if (cuda_result != hipSuccess) printf("Problem with the allocation of memory !\n");
// transfer from CPU to GPU
cuda_result = hipMemcpy(t_sources_d, test_points, n_test * sizeof(int),
hipMemcpyHostToDevice);
if (cuda_result != hipSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cuda_result = hipMemcpy(traces_H_d, traces_H, n_samples * n_stations_whole_array * sizeof(float),
hipMemcpyHostToDevice);
if (cuda_result != hipSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cuda_result = hipMemcpy(traces_Z_d, traces_Z, n_samples * n_stations_whole_array * sizeof(float),
hipMemcpyHostToDevice);
if (cuda_result != hipSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cuda_result = hipMemcpy(moveouts_S_d, moveouts_S, n_sources * n_stations_whole_array * sizeof(int),
hipMemcpyHostToDevice);
if (cuda_result != hipSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cuda_result = hipMemcpy(moveouts_P_d, moveouts_P, n_sources * n_stations_whole_array * sizeof(int),
hipMemcpyHostToDevice);
if (cuda_result != hipSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cuda_result = hipMemcpy(st_idx_d, st_idx, n_sources * n_stations_restricted_array * sizeof(int),
hipMemcpyHostToDevice);
if (cuda_result != hipSuccess) printf("Problem when transfering memory from CPU to GPU \n");
hipMemGetInfo(&free,&total);
printf("%d KB free of total %d KB\n",free/1024,total/1024);
//hipFuncSetCacheConfig("stack_kernel", hipFuncCachePreferL1);
dim3 GS1(ceil((float)n_test/(float)BS.x) + 1);
hipLaunchKernelGGL(( moveouts_kernel), dim3(GS1), dim3(BS), 0, 0, moveouts_P_d,
moveouts_minmax_d,
t_sources_d,
st_idx_d,
n_test,
n_stations_whole_array,
n_stations_restricted_array);
// return an error if something happened in the kernel (and crash the program)
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
hipDeviceSynchronize();
dim3 GS2(ceil((float)n_samples_thread/(float)BS.x) + 1);
//printf("Number of calls: %.2f \n", ceil(n_samples/(float)GRID_SIZE));
hipLaunchKernelGGL(( stack_SP_kernel), dim3(GS2), dim3(BS), 0, 0, traces_H_d + device_shift,
traces_Z_d + device_shift,
moveouts_P_d,
moveouts_S_d,
moveouts_minmax_d,
st_idx_d,
t_sources_d,
nw_response_d,
biggest_idx_d,
n_samples,
n_samples_thread,
n_test,
n_stations_whole_array,
n_stations_restricted_array);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipDeviceSynchronize();
cuda_result = hipMemcpy(network_response + device_shift, nw_response_d, n_samples_thread * sizeof(float),
hipMemcpyDeviceToHost);
if (cuda_result != hipSuccess) printf("Problem with the transfer of memory !\n");
cuda_result = hipMemcpy(biggest_idx + device_shift, biggest_idx_d, n_samples_thread * sizeof(int),
hipMemcpyDeviceToHost);
if (cuda_result != hipSuccess) printf("Problem with the transfer of memory !\n");
hipDeviceSynchronize();
hipFree(t_sources_d);
hipFree(traces_H_d);
hipFree(traces_Z_d);
hipFree(moveouts_S_d);
hipFree(moveouts_P_d);
hipFree(st_idx_d);
hipFree(nw_response_d);
hipFree(biggest_idx_d);
hipFree(moveouts_minmax_d);
}
}
} // extern C
| libcu.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
extern "C" {
#include "libcu.h"
#define BLOCKSIZE 512
#define WARPSIZE 32
#define NCHUNKS 20
#define SATURATION_SCALE 7.0f
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void __global__ moveouts_kernel(int* moveouts, int *moveouts_mm,
int *test_sources, int *station_indexes,
int n_test,
int n_stations_whole_array,
int n_stations_restricted_array) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int s, ss, max_moveout, min_moveout;
float moveout = 0.;
if(idx >= n_test) return;
max_moveout=0;
min_moveout=1000000; // shoud be MAX_INT
for (s = 0; s < n_stations_restricted_array; s++) {
// map from the whole stations array to the restricted array
ss = station_indexes[test_sources[idx] * n_stations_restricted_array + s];
moveout = moveouts[test_sources[idx] * n_stations_whole_array + ss];
if (moveout > max_moveout) {
max_moveout = moveout;
}
if (moveout < min_moveout) {
min_moveout = moveout;
}
}
moveouts_mm[idx * 2 + 0] = min_moveout;
moveouts_mm[idx * 2 + 1] = max_moveout;
}
void __global__ stack_S_kernel(float* tracesN,
float* tracesE,
int* moveouts,
int* moveouts_minmax,
int* station_indexes,
int* test_sources,
float* nw_response,
int* biggest_idx,
int n_samples,
int n_test,
int n_stations_whole_array,
int n_stations_restricted_array) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float network_response_max, sum_beamform;
int i, s, ss, source_index_whole_array, source_index_restricted_array, network_resp_idx_max, shift, moveout;
float *tracesN_, *tracesE_; // local pointers
if(idx >= n_samples){
return;
}
network_response_max = 0.0;
for (i = 0; i < n_test; i++) {
source_index_whole_array = test_sources[i] * n_stations_whole_array; // position on the moveouts vector
source_index_restricted_array = test_sources[i] * n_stations_restricted_array; // position on the station indexes vector
sum_beamform = 0.0;
shift = idx - moveouts_minmax[i * 2 + 0]; // position on the time axis
if (shift < 0) continue; // don't do anything before time 0
// ----------------------------------------------------------
// define the local pointers
tracesN_ = tracesN + shift;
tracesE_ = tracesE + shift;
// ----------------------------------------------------------
for (s = 0; s < n_stations_restricted_array; s++) {
// map from the restricted array (closest stations) to the whole array of stations
ss = station_indexes[source_index_restricted_array + s];
moveout = moveouts[source_index_whole_array + ss];
if(shift + moveout < n_samples){
// rotate the traces to get the transverse component and stack them
sum_beamform += __ldg(&(tracesN_[ss * n_samples + moveout])) + \
__ldg(&(tracesE_[ss * n_samples + moveout]));
}
}
if (sum_beamform > network_response_max) {
network_response_max = sum_beamform;
network_resp_idx_max = test_sources[i];
}
}
nw_response[idx] = network_response_max;
biggest_idx[idx] = network_resp_idx_max;
}
void __global__ stack_SP_kernel(float* tracesH,
float* tracesZ,
int* moveouts_P,
int* moveouts_S,
int* moveouts_minmax,
int* station_indexes,
int* test_sources,
float* nw_response,
int* biggest_idx,
int n_samples,
int n_samples_thread,
int n_test,
int n_stations_whole_array,
int n_stations_restricted_array) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float network_response_max, sum_beamform_SP, sum_beamform_S, sum_beamform_P;
int i, s, ss, source_index_whole_array, source_index_restricted_array, network_resp_idx_max, shift, moveout_P, moveout_S;
float traceH_, traceZ_; // samples to stack
if(idx >= n_samples_thread){
return;
}
network_response_max = -INFINITY;
for (i = 0; i < n_test; i++) {
source_index_whole_array = test_sources[i] * n_stations_whole_array; // position on the moveouts vector
source_index_restricted_array = test_sources[i] * n_stations_restricted_array; // position on the station indexes vector
sum_beamform_SP = 0.0;
sum_beamform_S = 0.0;
sum_beamform_P = 0.0;
shift = idx - moveouts_minmax[i * 2 + 0]; // position on the time axis
if (shift < 0) continue; // don't do anything before time 0
for (s = 0; s < n_stations_restricted_array; s++) {
// map from the closest stations to the whole array of stations
ss = station_indexes[source_index_restricted_array + s];
moveout_P = moveouts_P[source_index_whole_array + ss];
moveout_S = moveouts_S[source_index_whole_array + ss];
if(shift + moveout_S < n_samples){
// !!! shift + moveout can still be > n_samples
traceH_ = __ldg(&(tracesH[ss * n_samples + shift + moveout_S]));
traceZ_ = __ldg(&(tracesZ[ss * n_samples + shift + moveout_P]));
sum_beamform_S += 2. * traceH_;
sum_beamform_P += traceZ_;
}
}
sum_beamform_SP = sum_beamform_S + sum_beamform_P;
if (sum_beamform_SP > network_response_max) {
network_response_max = sum_beamform_SP;
network_resp_idx_max = test_sources[i];
}
}
nw_response[idx] = network_response_max;
biggest_idx[idx] = network_resp_idx_max;
}
void network_response(int* test_points,
float* tracesN,
float* tracesE,
int* moveouts,
int* st_idx,
int n_test,
int n_samples,
float* network_response,
int* biggest_idx,
int n_stations_whole_array,
int n_stations_restricted_array,
int n_sources) {
// cuda device vars
int *t_sources_d;
float *traces_N_d;
float *traces_E_d;
int *moveouts_d;
int *st_idx_d;
float *nw_response_d;
int *biggest_idx_d;
int *moveouts_minmax_d;
int ngpus = 0;
int GRID_SIZE = 1024;
cudaError_t cuda_result;
// check how many devices are available
cudaGetDeviceCount(&ngpus);
printf("%d cuda devices found on the node\n", ngpus);
/*for (int n=0; n<ngpus; n++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, n);
printf("Device nb %i, name %i \n", n, prop.name);
printf("Total memory: %i \n", prop.totalGlobalMem);
printf("Shared memory per block: %i \n", prop.sharedMemPerBlock);
}*/
size_t free, total;
printf("\n");
cudaMemGetInfo(&free,&total);
printf("%d KB free of total %d KB\n",free/1024,total/1024);
// allocate memory on device
cuda_result = cudaMalloc((void**)&t_sources_d, n_test * sizeof(int));
if (cuda_result != cudaSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = cudaMalloc((void**)&traces_N_d, n_samples * n_stations_whole_array * sizeof(float));
if (cuda_result != cudaSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = cudaMalloc((void**)&traces_E_d, n_samples * n_stations_whole_array * sizeof(float));
if (cuda_result != cudaSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = cudaMalloc((void**)&moveouts_d, n_sources * n_stations_restricted_array * sizeof(int));
if (cuda_result != cudaSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = cudaMalloc((void**)&st_idx_d, n_sources * n_stations_restricted_array * sizeof(int));
if (cuda_result != cudaSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = cudaMalloc((void**)&nw_response_d, n_samples * sizeof(float));
if (cuda_result != cudaSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = cudaMalloc((void**)&biggest_idx_d, n_samples * sizeof(int));
if (cuda_result != cudaSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = cudaMalloc((void**)&moveouts_minmax_d, 2 * n_test * sizeof(int));
if (cuda_result != cudaSuccess) printf("Problem with the allocation of memory !\n");
// transfer from CPU to GPU
cuda_result = cudaMemcpy(t_sources_d, test_points, n_test * sizeof(int),
cudaMemcpyHostToDevice);
if (cuda_result != cudaSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cuda_result = cudaMemcpy(traces_N_d, tracesN, n_samples * n_stations_whole_array * sizeof(float),
cudaMemcpyHostToDevice);
if (cuda_result != cudaSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cuda_result = cudaMemcpy(traces_E_d, tracesE, n_samples * n_stations_whole_array * sizeof(float),
cudaMemcpyHostToDevice);
if (cuda_result != cudaSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cuda_result = cudaMemcpy(moveouts_d, moveouts, n_sources * n_stations_restricted_array * sizeof(int),
cudaMemcpyHostToDevice);
if (cuda_result != cudaSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cuda_result = cudaMemcpy(st_idx_d, st_idx, n_sources * n_stations_restricted_array * sizeof(int),
cudaMemcpyHostToDevice);
if (cuda_result != cudaSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cudaMemGetInfo(&free,&total);
printf("%d KB free of total %d KB\n",free/1024,total/1024);
//cudaFuncSetCacheConfig("stack_kernel", cudaFuncCachePreferL1);
moveouts_kernel<<<ceil(n_test/(float)GRID_SIZE),GRID_SIZE>>>(moveouts_d,
moveouts_minmax_d,
t_sources_d,
st_idx_d,
n_test,
n_stations_whole_array,
n_stations_restricted_array);
//printf("Number of calls: %.2f \n", ceil(n_samples/(float)GRID_SIZE));
stack_S_kernel<<<ceil(n_samples/(float)GRID_SIZE),GRID_SIZE>>>(traces_N_d,
traces_E_d,
moveouts_d,
moveouts_minmax_d,
st_idx_d,
t_sources_d,
nw_response_d,
biggest_idx_d,
n_samples,
n_test,
n_stations_whole_array,
n_stations_restricted_array);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
cuda_result = cudaMemcpy(network_response, nw_response_d,n_samples * sizeof(float),
cudaMemcpyDeviceToHost);
if (cuda_result != cudaSuccess) printf("Problem with the transfer of memory !\n");
cuda_result = cudaMemcpy(biggest_idx, biggest_idx_d, n_samples * sizeof(int),
cudaMemcpyDeviceToHost);
if (cuda_result != cudaSuccess) printf("Problem with the transfer of memory !\n");
cudaFree(t_sources_d);
cudaFree(traces_N_d);
cudaFree(traces_E_d);
cudaFree(moveouts_d);
cudaFree(st_idx_d);
cudaFree(nw_response_d);
cudaFree(biggest_idx_d);
cudaFree(moveouts_minmax_d);
}
void network_response_SP(int* test_points,
float* traces_H,
float* traces_Z,
int* moveouts_P,
int* moveouts_S,
int* st_idx,
int n_test,
int n_samples,
float* network_response,
int* biggest_idx,
int n_stations_whole_array,
int n_stations_restricted_array,
int n_sources) {
int nGPUs = 0;
// check how many devices are available
cudaGetDeviceCount(&nGPUs);
omp_set_num_threads(nGPUs);
printf("%d cuda devices found on the node\n", nGPUs);
size_t free, total;
printf("\n");
cudaMemGetInfo(&free,&total);
printf("%d KB free of total %d KB\n",free/1024,total/1024);
#pragma omp parallel shared(test_points, traces_H, traces_Z, moveouts_P, moveouts_S, st_idx, \
network_response, biggest_idx)
{
int id = omp_get_thread_num();
cudaSetDevice(id);
dim3 BS(512);
int n_samples_thread = n_samples/nGPUs;
int device_shift = id*n_samples_thread;
if (id == nGPUs){
n_samples_thread += n_samples - (n_samples/nGPUs)*nGPUs;
}
// cuda device vars
int *t_sources_d;
float *traces_H_d;
float *traces_Z_d;
int *moveouts_P_d;
int *moveouts_S_d;
int *st_idx_d;
float *nw_response_d;
int *biggest_idx_d;
int *moveouts_minmax_d;
cudaError_t cuda_result;
// allocate memory on device
cuda_result = cudaMalloc((void**)&t_sources_d, n_test * sizeof(int));
if (cuda_result != cudaSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = cudaMalloc((void**)&traces_H_d, n_samples * n_stations_whole_array * sizeof(float));
if (cuda_result != cudaSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = cudaMalloc((void**)&traces_Z_d, n_samples * n_stations_whole_array * sizeof(float));
if (cuda_result != cudaSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = cudaMalloc((void**)&moveouts_S_d, n_sources * n_stations_whole_array * sizeof(int));
if (cuda_result != cudaSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = cudaMalloc((void**)&moveouts_P_d, n_sources * n_stations_whole_array * sizeof(int));
if (cuda_result != cudaSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = cudaMalloc((void**)&st_idx_d, n_sources * n_stations_restricted_array * sizeof(int));
if (cuda_result != cudaSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = cudaMalloc((void**)&nw_response_d, n_samples_thread * sizeof(float));
if (cuda_result != cudaSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = cudaMalloc((void**)&biggest_idx_d, n_samples_thread * sizeof(int));
if (cuda_result != cudaSuccess) printf("Problem with the allocation of memory !\n");
cuda_result = cudaMalloc((void**)&moveouts_minmax_d, 2 * n_test * sizeof(int));
if (cuda_result != cudaSuccess) printf("Problem with the allocation of memory !\n");
// transfer from CPU to GPU
cuda_result = cudaMemcpy(t_sources_d, test_points, n_test * sizeof(int),
cudaMemcpyHostToDevice);
if (cuda_result != cudaSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cuda_result = cudaMemcpy(traces_H_d, traces_H, n_samples * n_stations_whole_array * sizeof(float),
cudaMemcpyHostToDevice);
if (cuda_result != cudaSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cuda_result = cudaMemcpy(traces_Z_d, traces_Z, n_samples * n_stations_whole_array * sizeof(float),
cudaMemcpyHostToDevice);
if (cuda_result != cudaSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cuda_result = cudaMemcpy(moveouts_S_d, moveouts_S, n_sources * n_stations_whole_array * sizeof(int),
cudaMemcpyHostToDevice);
if (cuda_result != cudaSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cuda_result = cudaMemcpy(moveouts_P_d, moveouts_P, n_sources * n_stations_whole_array * sizeof(int),
cudaMemcpyHostToDevice);
if (cuda_result != cudaSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cuda_result = cudaMemcpy(st_idx_d, st_idx, n_sources * n_stations_restricted_array * sizeof(int),
cudaMemcpyHostToDevice);
if (cuda_result != cudaSuccess) printf("Problem when transfering memory from CPU to GPU \n");
cudaMemGetInfo(&free,&total);
printf("%d KB free of total %d KB\n",free/1024,total/1024);
//cudaFuncSetCacheConfig("stack_kernel", cudaFuncCachePreferL1);
dim3 GS1(ceil((float)n_test/(float)BS.x) + 1);
moveouts_kernel<<<GS1, BS>>>(moveouts_P_d,
moveouts_minmax_d,
t_sources_d,
st_idx_d,
n_test,
n_stations_whole_array,
n_stations_restricted_array);
// return an error if something happened in the kernel (and crash the program)
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
cudaDeviceSynchronize();
dim3 GS2(ceil((float)n_samples_thread/(float)BS.x) + 1);
//printf("Number of calls: %.2f \n", ceil(n_samples/(float)GRID_SIZE));
stack_SP_kernel<<<GS2, BS>>>(traces_H_d + device_shift,
traces_Z_d + device_shift,
moveouts_P_d,
moveouts_S_d,
moveouts_minmax_d,
st_idx_d,
t_sources_d,
nw_response_d,
biggest_idx_d,
n_samples,
n_samples_thread,
n_test,
n_stations_whole_array,
n_stations_restricted_array);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
cudaDeviceSynchronize();
cuda_result = cudaMemcpy(network_response + device_shift, nw_response_d, n_samples_thread * sizeof(float),
cudaMemcpyDeviceToHost);
if (cuda_result != cudaSuccess) printf("Problem with the transfer of memory !\n");
cuda_result = cudaMemcpy(biggest_idx + device_shift, biggest_idx_d, n_samples_thread * sizeof(int),
cudaMemcpyDeviceToHost);
if (cuda_result != cudaSuccess) printf("Problem with the transfer of memory !\n");
cudaDeviceSynchronize();
cudaFree(t_sources_d);
cudaFree(traces_H_d);
cudaFree(traces_Z_d);
cudaFree(moveouts_S_d);
cudaFree(moveouts_P_d);
cudaFree(st_idx_d);
cudaFree(nw_response_d);
cudaFree(biggest_idx_d);
cudaFree(moveouts_minmax_d);
}
}
} // extern C
|
1f2df3d318548e4e54c573c3132a5581ea113704.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k128_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k128_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k128_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k128_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 128, 128, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 128, 128, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x128_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 128, 128, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 128, 128, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x128_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| 1f2df3d318548e4e54c573c3132a5581ea113704.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k128_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k128_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k128_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k128_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 128, 128, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 128, 128, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x128_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 128, 128, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 128, 128, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x128_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
5f84c98c3f0e78e60e61afee44afe0c633143cc7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/matrix/kernelparams.h>
#include <cuml/svm/svm_model.h>
#include <cuml/svm/svm_parameter.h>
#include <cmath>
#include <cuml/cuml.hpp>
#include <cuml/svm/svc.hpp>
#include <cuml/svm/svr.hpp>
#include <utility>
#include "benchmark.cuh"
namespace ML {
namespace Bench {
namespace SVM {
template <typename D>
struct SvrParams {
DatasetParams data;
RegressionParams regression;
MLCommon::Matrix::KernelParams kernel;
ML::SVM::svmParameter svm_param;
ML::SVM::svmModel<D> model;
};
template <typename D>
class SVR : public RegressionFixture<D> {
public:
SVR(const std::string& name, const SvrParams<D>& p)
: RegressionFixture<D>(name, p.data, p.regression),
kernel(p.kernel),
model(p.model),
svm_param(p.svm_param) {
std::vector<std::string> kernel_names{"linear", "poly", "rbf", "tanh"};
std::ostringstream oss;
oss << name << "/" << kernel_names[kernel.kernel] << p.data;
this->SetName(oss.str().c_str());
}
protected:
void runBenchmark(::benchmark::State& state) override {
if (this->params.rowMajor) {
state.SkipWithError("SVR only supports col-major inputs");
}
if (this->svm_param.svmType != ML::SVM::EPSILON_SVR) {
state.SkipWithError("SVR currently only supports EPSILON_SVR");
}
this->loopOnState(state, [this]() {
ML::SVM::svrFit(*this->handle, this->data.X, this->params.nrows,
this->params.ncols, this->data.y, this->svm_param,
this->kernel, this->model);
CUDA_CHECK(hipStreamSynchronize(this->stream));
ML::SVM::svmFreeBuffers(*this->handle, this->model);
});
}
private:
MLCommon::Matrix::KernelParams kernel;
ML::SVM::svmParameter svm_param;
ML::SVM::svmModel<D> model;
};
template <typename D>
std::vector<SvrParams<D>> getInputs() {
struct Triplets {
int nrows, ncols, n_informative;
};
std::vector<SvrParams<D>> out;
SvrParams<D> p;
p.data.rowMajor = false;
p.regression.shuffle = true; // better to shuffle when n_informative < ncols
p.regression.seed = 1378ULL;
p.regression.effective_rank = -1; // dataset generation will be faster
p.regression.bias = 0;
p.regression.tail_strength = 0.5; // unused when effective_rank = -1
p.regression.noise = 1;
// svmParameter{C, cache_size, max_iter, nochange_steps, tol, verbosity,
// epsilon, svmType})
p.svm_param = ML::SVM::svmParameter{
1, 200, 200, 100, 1e-3, CUML_LEVEL_INFO, 0.1, ML::SVM::EPSILON_SVR};
p.model =
ML::SVM::svmModel<D>{0, 0, 0, nullptr, nullptr, nullptr, 0, nullptr};
std::vector<Triplets> rowcols = {
{50000, 2, 2}, {1024, 10000, 10}, {3000, 200, 200}};
std::vector<MLCommon::Matrix::KernelParams> kernels{
MLCommon::Matrix::KernelParams{MLCommon::Matrix::LINEAR, 3, 1, 0},
MLCommon::Matrix::KernelParams{MLCommon::Matrix::POLYNOMIAL, 3, 1, 0},
MLCommon::Matrix::KernelParams{MLCommon::Matrix::RBF, 3, 1, 0},
MLCommon::Matrix::KernelParams{MLCommon::Matrix::TANH, 3, 0.1, 0}};
for (auto& rc : rowcols) {
p.data.nrows = rc.nrows;
p.data.ncols = rc.ncols;
p.regression.n_informative = rc.n_informative;
// Limit the number of iterations for large tests
p.svm_param.max_iter = (rc.nrows > 10000) ? 50 : 200;
for (auto kernel : kernels) {
p.kernel = kernel;
p.kernel.gamma = 1.0 / rc.ncols;
out.push_back(p);
}
}
return out;
}
ML_BENCH_REGISTER(SvrParams<float>, SVR<float>, "regression",
getInputs<float>());
ML_BENCH_REGISTER(SvrParams<double>, SVR<double>, "regression",
getInputs<double>());
} // namespace SVM
} // namespace Bench
} // end namespace ML
| 5f84c98c3f0e78e60e61afee44afe0c633143cc7.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/matrix/kernelparams.h>
#include <cuml/svm/svm_model.h>
#include <cuml/svm/svm_parameter.h>
#include <cmath>
#include <cuml/cuml.hpp>
#include <cuml/svm/svc.hpp>
#include <cuml/svm/svr.hpp>
#include <utility>
#include "benchmark.cuh"
namespace ML {
namespace Bench {
namespace SVM {
template <typename D>
struct SvrParams {
DatasetParams data;
RegressionParams regression;
MLCommon::Matrix::KernelParams kernel;
ML::SVM::svmParameter svm_param;
ML::SVM::svmModel<D> model;
};
template <typename D>
class SVR : public RegressionFixture<D> {
public:
SVR(const std::string& name, const SvrParams<D>& p)
: RegressionFixture<D>(name, p.data, p.regression),
kernel(p.kernel),
model(p.model),
svm_param(p.svm_param) {
std::vector<std::string> kernel_names{"linear", "poly", "rbf", "tanh"};
std::ostringstream oss;
oss << name << "/" << kernel_names[kernel.kernel] << p.data;
this->SetName(oss.str().c_str());
}
protected:
void runBenchmark(::benchmark::State& state) override {
if (this->params.rowMajor) {
state.SkipWithError("SVR only supports col-major inputs");
}
if (this->svm_param.svmType != ML::SVM::EPSILON_SVR) {
state.SkipWithError("SVR currently only supports EPSILON_SVR");
}
this->loopOnState(state, [this]() {
ML::SVM::svrFit(*this->handle, this->data.X, this->params.nrows,
this->params.ncols, this->data.y, this->svm_param,
this->kernel, this->model);
CUDA_CHECK(cudaStreamSynchronize(this->stream));
ML::SVM::svmFreeBuffers(*this->handle, this->model);
});
}
private:
MLCommon::Matrix::KernelParams kernel;
ML::SVM::svmParameter svm_param;
ML::SVM::svmModel<D> model;
};
template <typename D>
std::vector<SvrParams<D>> getInputs() {
struct Triplets {
int nrows, ncols, n_informative;
};
std::vector<SvrParams<D>> out;
SvrParams<D> p;
p.data.rowMajor = false;
p.regression.shuffle = true; // better to shuffle when n_informative < ncols
p.regression.seed = 1378ULL;
p.regression.effective_rank = -1; // dataset generation will be faster
p.regression.bias = 0;
p.regression.tail_strength = 0.5; // unused when effective_rank = -1
p.regression.noise = 1;
// svmParameter{C, cache_size, max_iter, nochange_steps, tol, verbosity,
// epsilon, svmType})
p.svm_param = ML::SVM::svmParameter{
1, 200, 200, 100, 1e-3, CUML_LEVEL_INFO, 0.1, ML::SVM::EPSILON_SVR};
p.model =
ML::SVM::svmModel<D>{0, 0, 0, nullptr, nullptr, nullptr, 0, nullptr};
std::vector<Triplets> rowcols = {
{50000, 2, 2}, {1024, 10000, 10}, {3000, 200, 200}};
std::vector<MLCommon::Matrix::KernelParams> kernels{
MLCommon::Matrix::KernelParams{MLCommon::Matrix::LINEAR, 3, 1, 0},
MLCommon::Matrix::KernelParams{MLCommon::Matrix::POLYNOMIAL, 3, 1, 0},
MLCommon::Matrix::KernelParams{MLCommon::Matrix::RBF, 3, 1, 0},
MLCommon::Matrix::KernelParams{MLCommon::Matrix::TANH, 3, 0.1, 0}};
for (auto& rc : rowcols) {
p.data.nrows = rc.nrows;
p.data.ncols = rc.ncols;
p.regression.n_informative = rc.n_informative;
// Limit the number of iterations for large tests
p.svm_param.max_iter = (rc.nrows > 10000) ? 50 : 200;
for (auto kernel : kernels) {
p.kernel = kernel;
p.kernel.gamma = 1.0 / rc.ncols;
out.push_back(p);
}
}
return out;
}
ML_BENCH_REGISTER(SvrParams<float>, SVR<float>, "regression",
getInputs<float>());
ML_BENCH_REGISTER(SvrParams<double>, SVR<double>, "regression",
getInputs<double>());
} // namespace SVM
} // namespace Bench
} // end namespace ML
|
18e2a30045262ebc24e637b3e98e5b5178414272.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
// matrix A is in column major format
#define idxColumnMajor(idxrow,idxcol,nrow,ncol) idxcol*nrow+idxrow
// matrix B is in row major format
#define idxRowMajor(idxrow,idxcol,nrow,ncol) idxrow*ncol+idxcol
#define TILE_SIZE_A 32
#define TILE_SIZE_B 8
// Compute C = A * B
__global__ void matrixMultiply_kernel(float *A, float *B, float *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
//@@ Insert code to implement matrix multiplication here
//@@ You have to perform register tiling for this MP
__shared__ float sB_tile[TILE_SIZE_B];
float C_tile[TILE_SIZE_B] = {};
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// thread coarsening: for each input matrix element of A (thread), we TILE_SIZE_B inner products
int idxRow = by * TILE_SIZE_A + ty;
// shared memory tiling of input matrix B
int idxCol = bx * TILE_SIZE_B + tx;
// memory coalescing on both A and B (since A is transposed)
if (idxRow<numARows && idxCol<numBColumns){
for(int k=0;k<numBRows;k++){
// load tile of B into gpu shared memory
if (tx<TILE_SIZE_B){
int idxB = idxRowMajor(k,idxCol,numBRows,numBColumns);
sB_tile[tx] = B[idxB];
}
__syncthreads();
//perform matrix mult with B shared tile and save it into local register array for C
int idxA = idxColumnMajor(idxRow,k,numARows,numAColumns);
float A_element = A[idxA]; // store in register
// loop over tile of B which is in shared memory
// this is thread coarsening on
for (int idxB=0; idxB<TILE_SIZE_B; idxB++){
C_tile[idxB] += A_element*sB_tile[idxB];
}
__syncthreads();
}
// save changes of local output tile into global C matrix
for (int idxTile=0; idxTile<TILE_SIZE_B; idxTile++){
idxCol = bx * TILE_SIZE_B + idxTile;
// make sure not to exceed number of col. in B otherwise we
// will start writing into the next row unintentionally
if (idxCol<numBColumns){
int idxC = idxRowMajor(idxRow,idxCol,numCRows,numCColumns);
C[idxC] = C_tile[idxTile];
}
}
}
}
static void matrixMultiply(float *A, float *B, float *C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns) {
//@@ Insert code to launch matrix multiplication
dim3 grid((numBColumns-1)/TILE_SIZE_B + 1, (numARows-1)/TILE_SIZE_A + 1, 1);
dim3 block(TILE_SIZE_B, TILE_SIZE_A, 1);
hipLaunchKernelGGL(( matrixMultiply_kernel) , dim3(grid), dim3(block) , 0, 0,
A,B,C,
numARows,numAColumns,
numBRows,numBColumns,
numCRows,numCColumns);
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set
// this)
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numAColumns,
&numARows);
hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows,
&numBColumns);
numCRows = numARows;
numCColumns = numBColumns;
hostC = (float *)malloc(sizeof(float) * numCRows * numCColumns);
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
hipMalloc((void **)&deviceA, sizeof(float) * numARows * numAColumns);
hipMalloc((void **)&deviceB, sizeof(float) * numBRows * numBColumns);
hipMalloc((void **)&deviceC, sizeof(float) * numCRows * numCColumns);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
hipMemcpy(deviceA, hostA, sizeof(float) * numARows * numAColumns,
hipMemcpyHostToDevice);
hipMemcpy(deviceB, hostB, sizeof(float) * numBRows * numBColumns,
hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
wbTime_start(Compute, "Performing CUDA computation");
matrixMultiply(deviceA, deviceB, deviceC, numARows, numAColumns,
numBRows, numBColumns, numCRows, numCColumns);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
hipMemcpy(hostC, deviceC, sizeof(float) * numCRows * numCColumns,
hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
| 18e2a30045262ebc24e637b3e98e5b5178414272.cu | #include <wb.h>
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
// matrix A is in column major format
#define idxColumnMajor(idxrow,idxcol,nrow,ncol) idxcol*nrow+idxrow
// matrix B is in row major format
#define idxRowMajor(idxrow,idxcol,nrow,ncol) idxrow*ncol+idxcol
#define TILE_SIZE_A 32
#define TILE_SIZE_B 8
// Compute C = A * B
__global__ void matrixMultiply_kernel(float *A, float *B, float *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
//@@ Insert code to implement matrix multiplication here
//@@ You have to perform register tiling for this MP
__shared__ float sB_tile[TILE_SIZE_B];
float C_tile[TILE_SIZE_B] = {};
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// thread coarsening: for each input matrix element of A (thread), we TILE_SIZE_B inner products
int idxRow = by * TILE_SIZE_A + ty;
// shared memory tiling of input matrix B
int idxCol = bx * TILE_SIZE_B + tx;
// memory coalescing on both A and B (since A is transposed)
if (idxRow<numARows && idxCol<numBColumns){
for(int k=0;k<numBRows;k++){
// load tile of B into gpu shared memory
if (tx<TILE_SIZE_B){
int idxB = idxRowMajor(k,idxCol,numBRows,numBColumns);
sB_tile[tx] = B[idxB];
}
__syncthreads();
//perform matrix mult with B shared tile and save it into local register array for C
int idxA = idxColumnMajor(idxRow,k,numARows,numAColumns);
float A_element = A[idxA]; // store in register
// loop over tile of B which is in shared memory
// this is thread coarsening on
for (int idxB=0; idxB<TILE_SIZE_B; idxB++){
C_tile[idxB] += A_element*sB_tile[idxB];
}
__syncthreads();
}
// save changes of local output tile into global C matrix
for (int idxTile=0; idxTile<TILE_SIZE_B; idxTile++){
idxCol = bx * TILE_SIZE_B + idxTile;
// make sure not to exceed number of col. in B otherwise we
// will start writing into the next row unintentionally
if (idxCol<numBColumns){
int idxC = idxRowMajor(idxRow,idxCol,numCRows,numCColumns);
C[idxC] = C_tile[idxTile];
}
}
}
}
static void matrixMultiply(float *A, float *B, float *C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns) {
//@@ Insert code to launch matrix multiplication
dim3 grid((numBColumns-1)/TILE_SIZE_B + 1, (numARows-1)/TILE_SIZE_A + 1, 1);
dim3 block(TILE_SIZE_B, TILE_SIZE_A, 1);
matrixMultiply_kernel <<< grid, block >>>
(A,B,C,
numARows,numAColumns,
numBRows,numBColumns,
numCRows,numCColumns);
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set
// this)
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numAColumns,
&numARows);
hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows,
&numBColumns);
numCRows = numARows;
numCColumns = numBColumns;
hostC = (float *)malloc(sizeof(float) * numCRows * numCColumns);
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
cudaMalloc((void **)&deviceA, sizeof(float) * numARows * numAColumns);
cudaMalloc((void **)&deviceB, sizeof(float) * numBRows * numBColumns);
cudaMalloc((void **)&deviceC, sizeof(float) * numCRows * numCColumns);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
cudaMemcpy(deviceA, hostA, sizeof(float) * numARows * numAColumns,
cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, sizeof(float) * numBRows * numBColumns,
cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
wbTime_start(Compute, "Performing CUDA computation");
matrixMultiply(deviceA, deviceB, deviceC, numARows, numAColumns,
numBRows, numBColumns, numCRows, numCColumns);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
cudaMemcpy(hostC, deviceC, sizeof(float) * numCRows * numCColumns,
cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
42fb36230dd9f144416bc96e77c50f587880fd1b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Timer.hpp"
#include <assert.h>
#include <iostream>
#include <vector>
#undef NDEBUG
// Sequential norm, for validation
float norm(const std::vector<float>& v) {
float sum = 0.0;
for (size_t i = 0; i < v.size(); ++i){
sum += v[i] * v[i];
}
return std::sqrt(sum);
}
template <class T>
__global__ void
noblock_sq(unsigned int array_size, T *x, T *y){
size_t i = blockDim.x * blockIdx.x + threadIdx.x;
if (i<array_size) y[i] = x[i] * x[i];
}
__global__ void
sq(float *g_idata, float *g_odata){
extern __shared__ float sdata[];
size_t tid = threadIdx.x;
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = (g_idata[i] * g_idata[i]);
__syncthreads();
for (size_t s = 1; s<blockDim.x; s*=2){
size_t index = 2*s*tid;
if (index < blockDim.x) {
sdata[index] += sdata[index + s];
}
__syncthreads();
}
if (tid == 0){
g_odata[blockIdx.x] = sdata[0];
}
}
int main(int argc, char* argv[]) {
size_t exponent = 27;
size_t num_trips = 1;
if (argc >= 2) exponent = std::stol(argv[1]);
if (argc >= 3) num_trips = std::stol(argv[2]);
const size_t N = 1 << exponent;
int block_size = 256;
int num_blocks = (N + block_size - 1) / block_size;
float *x = nullptr, *y = nullptr;
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, num_blocks*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = i;
}
std::vector<float> sequential_vector(x, x+N);
for (int i = 0; i < N; i++)
{
assert(x[i] == sequential_vector[i]);
}
DEF_TIMER(cuda_norm);
START_TIMER(cuda_norm);
hipDeviceSynchronize();
for (size_t i = 0; i < num_trips; ++i) {
hipLaunchKernelGGL(( sq), dim3(num_blocks), dim3(block_size), block_size*sizeof(float), 0, x, y);
hipDeviceSynchronize();
}
/* write me: final step, copy out values from y and add on cpu */
float result = 0.0;
for (size_t i = 0; i < num_blocks; ++i)
{
result += y[i];
}
result = std::sqrt(result);
double cuda_time = STOP_TIMER_QUIETLY(cuda_norm);
std::cout << exponent << "\t" << num_trips << "\t" << cuda_time << "\t" << result << std::endl;
// this WILL fail for exponents above 8 or so. Floating point error is a bitch.
assert(norm(sequential_vector) == result);
hipFree(x); hipFree(y);
return 0;
}
| 42fb36230dd9f144416bc96e77c50f587880fd1b.cu | #include "Timer.hpp"
#include <assert.h>
#include <iostream>
#include <vector>
#undef NDEBUG
// Sequential norm, for validation
float norm(const std::vector<float>& v) {
float sum = 0.0;
for (size_t i = 0; i < v.size(); ++i){
sum += v[i] * v[i];
}
return std::sqrt(sum);
}
template <class T>
__global__ void
noblock_sq(unsigned int array_size, T *x, T *y){
size_t i = blockDim.x * blockIdx.x + threadIdx.x;
if (i<array_size) y[i] = x[i] * x[i];
}
__global__ void
sq(float *g_idata, float *g_odata){
extern __shared__ float sdata[];
size_t tid = threadIdx.x;
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = (g_idata[i] * g_idata[i]);
__syncthreads();
for (size_t s = 1; s<blockDim.x; s*=2){
size_t index = 2*s*tid;
if (index < blockDim.x) {
sdata[index] += sdata[index + s];
}
__syncthreads();
}
if (tid == 0){
g_odata[blockIdx.x] = sdata[0];
}
}
int main(int argc, char* argv[]) {
size_t exponent = 27;
size_t num_trips = 1;
if (argc >= 2) exponent = std::stol(argv[1]);
if (argc >= 3) num_trips = std::stol(argv[2]);
const size_t N = 1 << exponent;
int block_size = 256;
int num_blocks = (N + block_size - 1) / block_size;
float *x = nullptr, *y = nullptr;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, num_blocks*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = i;
}
std::vector<float> sequential_vector(x, x+N);
for (int i = 0; i < N; i++)
{
assert(x[i] == sequential_vector[i]);
}
DEF_TIMER(cuda_norm);
START_TIMER(cuda_norm);
cudaDeviceSynchronize();
for (size_t i = 0; i < num_trips; ++i) {
sq<<<num_blocks, block_size, block_size*sizeof(float)>>>(x, y);
cudaDeviceSynchronize();
}
/* write me: final step, copy out values from y and add on cpu */
float result = 0.0;
for (size_t i = 0; i < num_blocks; ++i)
{
result += y[i];
}
result = std::sqrt(result);
double cuda_time = STOP_TIMER_QUIETLY(cuda_norm);
std::cout << exponent << "\t" << num_trips << "\t" << cuda_time << "\t" << result << std::endl;
// this WILL fail for exponents above 8 or so. Floating point error is a bitch.
assert(norm(sequential_vector) == result);
cudaFree(x); cudaFree(y);
return 0;
}
|
870caea9bf73b2d5c9e3ce288e407f2c5dbbaf8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_2_back;
int xdim0_update_halo_kernel2_xvel_plus_2_back_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_2_back;
int ydim0_update_halo_kernel2_xvel_plus_2_back_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_2_back;
int xdim1_update_halo_kernel2_xvel_plus_2_back_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_2_back;
int ydim1_update_halo_kernel2_xvel_plus_2_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_xvel_plus_2_back*(y)+xdim0_update_halo_kernel2_xvel_plus_2_back*ydim0_update_halo_kernel2_xvel_plus_2_back*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_xvel_plus_2_back*(y)+xdim1_update_halo_kernel2_xvel_plus_2_back*ydim1_update_halo_kernel2_xvel_plus_2_back*(z))
//user function
__device__
inline void update_halo_kernel2_xvel_plus_2_back_gpu(double *xvel0, double *xvel1, const int* fields)
{
if(fields[FIELD_XVEL0] == 1) xvel0[OPS_ACC0(0,0,0)] = xvel0[OPS_ACC0(0,0,2)];
if(fields[FIELD_XVEL1] == 1) xvel1[OPS_ACC1(0,0,0)] = xvel1[OPS_ACC1(0,0,2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_2_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_xvel_plus_2_back + idx_z * 1*1 * xdim0_update_halo_kernel2_xvel_plus_2_back * ydim0_update_halo_kernel2_xvel_plus_2_back;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_xvel_plus_2_back + idx_z * 1*1 * xdim1_update_halo_kernel2_xvel_plus_2_back * ydim1_update_halo_kernel2_xvel_plus_2_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_2_back_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_xvel_plus_2_back_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,32)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(32,"update_halo_kernel2_xvel_plus_2_back");
OPS_kernels[32].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_2_back_h || ydim0 != ydim0_update_halo_kernel2_xvel_plus_2_back_h || xdim1 != xdim1_update_halo_kernel2_xvel_plus_2_back_h || ydim1 != ydim1_update_halo_kernel2_xvel_plus_2_back_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel2_xvel_plus_2_back, &xdim0, sizeof(int) );
xdim0_update_halo_kernel2_xvel_plus_2_back_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel2_xvel_plus_2_back, &ydim0, sizeof(int) );
ydim0_update_halo_kernel2_xvel_plus_2_back_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel2_xvel_plus_2_back, &xdim1, sizeof(int) );
xdim1_update_halo_kernel2_xvel_plus_2_back_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel2_xvel_plus_2_back, &ydim1, sizeof(int) );
ydim1_update_halo_kernel2_xvel_plus_2_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[32].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_plus_2_back), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[32].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[32].mpi_time += t2-t1;
OPS_kernels[32].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[32].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 32;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 32;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_xvel_plus_2_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(32,"update_halo_kernel2_xvel_plus_2_back");
}
ops_enqueue_kernel(desc);
}
#endif
| 870caea9bf73b2d5c9e3ce288e407f2c5dbbaf8d.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_2_back;
int xdim0_update_halo_kernel2_xvel_plus_2_back_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_2_back;
int ydim0_update_halo_kernel2_xvel_plus_2_back_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_2_back;
int xdim1_update_halo_kernel2_xvel_plus_2_back_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_2_back;
int ydim1_update_halo_kernel2_xvel_plus_2_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_xvel_plus_2_back*(y)+xdim0_update_halo_kernel2_xvel_plus_2_back*ydim0_update_halo_kernel2_xvel_plus_2_back*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_xvel_plus_2_back*(y)+xdim1_update_halo_kernel2_xvel_plus_2_back*ydim1_update_halo_kernel2_xvel_plus_2_back*(z))
//user function
__device__
inline void update_halo_kernel2_xvel_plus_2_back_gpu(double *xvel0, double *xvel1, const int* fields)
{
if(fields[FIELD_XVEL0] == 1) xvel0[OPS_ACC0(0,0,0)] = xvel0[OPS_ACC0(0,0,2)];
if(fields[FIELD_XVEL1] == 1) xvel1[OPS_ACC1(0,0,0)] = xvel1[OPS_ACC1(0,0,2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_2_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_xvel_plus_2_back + idx_z * 1*1 * xdim0_update_halo_kernel2_xvel_plus_2_back * ydim0_update_halo_kernel2_xvel_plus_2_back;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_xvel_plus_2_back + idx_z * 1*1 * xdim1_update_halo_kernel2_xvel_plus_2_back * ydim1_update_halo_kernel2_xvel_plus_2_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_2_back_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_xvel_plus_2_back_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,32)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(32,"update_halo_kernel2_xvel_plus_2_back");
OPS_kernels[32].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_2_back_h || ydim0 != ydim0_update_halo_kernel2_xvel_plus_2_back_h || xdim1 != xdim1_update_halo_kernel2_xvel_plus_2_back_h || ydim1 != ydim1_update_halo_kernel2_xvel_plus_2_back_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel2_xvel_plus_2_back, &xdim0, sizeof(int) );
xdim0_update_halo_kernel2_xvel_plus_2_back_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel2_xvel_plus_2_back, &ydim0, sizeof(int) );
ydim0_update_halo_kernel2_xvel_plus_2_back_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel2_xvel_plus_2_back, &xdim1, sizeof(int) );
xdim1_update_halo_kernel2_xvel_plus_2_back_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel2_xvel_plus_2_back, &ydim1, sizeof(int) );
ydim1_update_halo_kernel2_xvel_plus_2_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[32].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_xvel_plus_2_back<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[32].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[32].mpi_time += t2-t1;
OPS_kernels[32].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[32].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 32;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 32;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_xvel_plus_2_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(32,"update_halo_kernel2_xvel_plus_2_back");
}
ops_enqueue_kernel(desc);
}
#endif
|
e19b7a62fc13053e1207c84f04b0a60eb3c4126e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, bottom_data, top_data);
//<--CUSTOMIZATION
if (input_scale_ != Dtype(1)) {
caffe_gpu_scal(count, input_scale_, top_data);
//caffe_gpu_round(count, top_data);
}
//CUSTOMIZATION-->
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, top_data);
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = top[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, top_diff, bottom_diff);
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_dot<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, bottom_diff);
// elementwise multiplication
caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer);
} // namespace caffe
| e19b7a62fc13053e1207c84f04b0a60eb3c4126e.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, bottom_data, top_data);
//<--CUSTOMIZATION
if (input_scale_ != Dtype(1)) {
caffe_gpu_scal(count, input_scale_, top_data);
//caffe_gpu_round(count, top_data);
}
//CUSTOMIZATION-->
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_max<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, top_data);
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = top[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, top_diff, bottom_diff);
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, bottom_diff);
// elementwise multiplication
caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer);
} // namespace caffe
|
b4020bca3d30feb44b30237a8a04f9c7041bd528.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathMagma.cu"
#else
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
#ifdef USE_MAGMA
static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, real *src, int k)
{
long size[1] = { k };
long stride[1] = { 1 };
THCTensor_(rawResize)(state, self, 1, size, stride);
size_t len = k * sizeof(real);
THCudaCheck(hipMemcpy(self->storage->data + self->storageOffset, src, len, hipMemcpyHostToDevice));
}
static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, real *src, int m, int n)
{
long size[2] = { m, n };
long stride[2] = { 1, m };
THCTensor_(rawResize)(state, self, 2, size, stride);
size_t len = m * n * sizeof(real);
THCudaCheck(hipMemcpy(self->storage->data + self->storageOffset, src, len, hipMemcpyHostToDevice));
}
static void THCTensor_(copyTensor2d)(THCState *state, real *dst, THCTensor *self)
{
THAssert(self->nDimension == 2);
size_t len = THCTensor_(nElement)(state, self)*sizeof(real);
THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1);
THCTensor *selfc = THCTensor_(newContiguous)(state, temp);
THCudaCheck(hipMemcpy(dst, selfc->storage->data + selfc->storageOffset, len, hipMemcpyDeviceToHost));
THCTensor_(free)(state, temp);
THCTensor_(free)(state, selfc);
}
#endif // USE_MAGMA
static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src)
{
THAssert(src->nDimension == 2);
if (self == src && self->stride[0] == 1 && self->stride[1] == self->size[0])
{
THCTensor_(retain)(state, self);
return self;
}
if (self == src)
self = THCTensor_(new)(state);
else
THCTensor_(retain)(state, self);
long size[2] = { src->size[0], src->size[1] };
long stride[2] = { 1, src->size[0] };
THCTensor_(rawResize)(state, self, 2, size, stride);
THCTensor_(copy)(state, self, src);
return self;
}
THC_API void THCTensor_(gesv)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional");
THArgCheck(b_->nDimension == 2, 2, "b should be 2 dimensional");
THArgCheck(a_->size[0] == a_->size[1], 1, "A should be square");
THArgCheck(b_->size[0] == a_->size[0], 2, "A,b size incompatible");
int n = a_->size[0];
int nrhs = b_->size[1];
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
real *a_data = THCTensor_(data)(state, a);
real *b_data = THCTensor_(data)(state, b);
int *ipiv = th_magma_malloc_pinned<int>(n);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info);
#else
magma_dgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info);
#endif
if (info < 0)
THError("MAGMA gesv : Argument %d : illegal value", -info);
else if (info > 0)
THError("MAGMA gesv : U(%d,%d) is zero, singular U.", info, info);
magma_free_pinned(ipiv);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gesv));
#endif
}
THC_API void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional");
THArgCheck(b_->nDimension == 2, 1, "b should be 2 dimensional");
THArgCheck(a_->size[0] == b_->size[0], 2, "size incompatible A,b");
THArgCheck(a_->size[0] >= a_->size[1], 2, "A should have m >= n");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
real *a_data = THCTensor_(data)(state, a);
real *b_data = THCTensor_(data)(state, b);
int m = a->size[0];
int n = a->size[1];
int nrhs = b->size[1];
real wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#endif
real *hwork = th_magma_malloc_pinned<real>((size_t)wkopt);
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#endif
magma_free_pinned(hwork);
if (info != 0)
THError("MAGMA gels : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gels));
#endif
}
THC_API void THCTensor_(syev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a, const char *jobzs, const char *uplos)
{
#ifdef USE_MAGMA
int n = a->size[0];
int lda = n;
magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower;
magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec;
THCTensor *input = THCTensor_(newColumnMajor)(state, rv_, a);
real *input_data = THCTensor_(data)(state, input);
// eigen values and workspace
real *w = th_magma_malloc_pinned<real>(n);
real *wA = th_magma_malloc_pinned<real>(lda);
// compute optimal size of work array
int info;
real lwork;
int liwork;
#if defined(THC_REAL_IS_FLOAT)
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info);
#else
magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info);
#endif
real *work = th_magma_malloc_pinned<real>((size_t)lwork);
int *iwork = th_magma_malloc_pinned<int>(liwork);
// compute eigenvalues and, optionally, eigenvectors
#if defined(THC_REAL_IS_FLOAT)
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info);
#else
magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info);
#endif
// copy eigen values from w to re_
if (info == 0)
THCTensor_(copyArray1d)(state, re_, w, n);
magma_free_pinned(iwork);
magma_free_pinned(work);
magma_free_pinned(wA);
magma_free_pinned(w);
// check error value
if (info > 0)
THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA syev : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, input, rv_);
#else
THError(NoMagma(syev));
#endif
}
THC_API void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a_, const char *jobvrs)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 3, "A should be 2 dimensional");
THArgCheck(a_->size[0] == a_->size[1], 3, "A should be square");
magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec;
int n = a_->size[0];
real *a_data = th_magma_malloc_pinned<real>(n * n);
THCTensor_(copyTensor2d)(state, a_data, a_);
real *wr = th_magma_malloc_pinned<real>(n);
real *wi = th_magma_malloc_pinned<real>(n);
real *vr_data = NULL;
int ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = th_magma_malloc_pinned<real>(n * n);
ldvr = n;
}
real wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#endif
int lwork = (int) wkopt;
real *work_data = th_magma_malloc_pinned<real>(lwork);
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#endif
if (info > 0)
THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA geev : Argument %d : illegal value", -info);
{
THCTensor_(resize2d)(state, re_, 2, n);
THCTensor *re = THCTensor_(newContiguous)(state, re_);
THCudaCheck(hipMemcpy(re->storage->data + re->storageOffset, wr, n*sizeof(real), hipMemcpyHostToDevice));
THCudaCheck(hipMemcpy(re->storage->data + re->storageOffset + n, wi, n*sizeof(real), hipMemcpyHostToDevice));
THCTensor_(freeCopyTo)(state, re, re_);
THCTensor_(transpose)(state, re_, NULL, 0, 1);
}
if (jobvr == MagmaVec)
THCTensor_(copyArray2d)(state, rv_, vr_data, n, n);
magma_free_pinned(work_data);
magma_free_pinned(vr_data);
magma_free_pinned(wi);
magma_free_pinned(wr);
magma_free_pinned(a_data);
#else
THError(NoMagma(geev));
#endif
}
THC_API void THCTensor_(gesvd)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *a, const char *jobu)
{
#ifdef USE_MAGMA
THCTensor *ra_ = THCTensor_(new)(state);
THCTensor_(gesvd2)(state, ru_, rs_, rv_, ra_, a, jobu);
THCTensor_(free)(state, ra_);
#else
THError(NoMagma(gesvd));
#endif
}
THC_API void THCTensor_(gesvd2)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *ra_, THCTensor *a, const char *jobus)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
magma_vec_t jobu = jobus[0] == 'A' ? MagmaAllVec : jobus[0] == 'S' ? MagmaSomeVec : jobus[0] == 'O' ? MagmaOverwriteVec : MagmaNoVec;
magma_vec_t jobvt = jobu;
int m = a->size[0];
int n = a->size[1];
int k = m < n ? m : n;
int j = (jobu == MagmaAllVec) ? m : k;
real *a_data = th_magma_malloc_pinned<real>(m * n);
THCTensor_(copyTensor2d)(state, a_data, a);
real *rs_data = th_magma_malloc_pinned<real>(k);
real *ru_data = th_magma_malloc_pinned<real>(m * j);
real *rv_data = th_magma_malloc_pinned<real>(n * n);
real wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgesvd(jobu, jobvt, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, &info);
#else
magma_dgesvd(jobu, jobvt, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, &info);
#endif
int lwork = (int) wkopt;
real *work_data = th_magma_malloc_pinned<real>(lwork);
#if defined(THC_REAL_IS_FLOAT)
magma_sgesvd(jobu, jobvt, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, &info);
#else
magma_dgesvd(jobu, jobvt, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, &info);
#endif
if (info > 0)
THError("MAGMA gesvd : %d superdiagonals failed to converge", info);
else if (info < 0)
THError("MAGMA gesvd : Argument %d : illegal value", -info);
THCTensor_(copyArray2d)(state, rv_, rv_data, n, n);
THCTensor_(transpose)(state, rv_, NULL, 0, 1);
THCTensor_(copyArray2d)(state, ru_, ru_data, m, j);
THCTensor_(copyArray1d)(state, rs_, rs_data, k);
THCTensor_(copyArray2d)(state, ra_, a_data, m, n);
magma_free_pinned(work_data);
magma_free_pinned(rv_data);
magma_free_pinned(ru_data);
magma_free_pinned(rs_data);
magma_free_pinned(a_data);
#else
THError(NoMagma(gesvd2));
#endif
}
THC_API void THCTensor_(getri)(THCState *state, THCTensor *ra_, THCTensor *a)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int info;
int n = a->size[0];
int lwork = n * magma_get_sgetri_nb(n);
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
real *input_data = THCTensor_(data)(state, input);
int *ipiv = th_magma_malloc_pinned<int>(n);
THCTensor *work = THCTensor_(newWithSize1d)(state, lwork);
real *work_data = THCTensor_(data)(state, work);
// Run LU
#if defined(THC_REAL_IS_FLOAT)
magma_sgetrf_gpu(n, n, input_data, n, ipiv, &info);
#else
magma_dgetrf_gpu(n, n, input_data, n, ipiv, &info);
#endif
if (info > 0)
THError("MAGMA getrf : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("MAGMA getrf : Argument %d : illegal value", -info);
// Inverse
#if defined(THC_REAL_IS_FLOAT)
magma_sgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info);
#else
magma_dgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info);
#endif
if (info > 0)
THError("MAGMA getri : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("MAGMA getri : Argument %d : illegal value", -info);
THCTensor_(free)(state, work);
magma_free_pinned(ipiv);
THCTensor_(freeCopyTo)(state, input, ra_);
#else
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int n = a->size[0];
// input
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
// output
THCTensor *output = THCTensor_(newColumnMajor)(state, ra_, a);
size_t matrices_size = sizeof(real*);
real **matrices1 = (real **)THAlloc(matrices_size);
const real **matrices1_const = (const real **)THAlloc(matrices_size);
real **matrices2 = (real **)THAlloc(matrices_size);
matrices1[0] = THCTensor_(data)(state, input);
matrices1_const[0] = THCTensor_(data)(state, input);
matrices2[0] = THCTensor_(data)(state, output);
// Copy pointers to device.
real **d_matrices1, **d_matrices2;
const real **d_matrices1_const;
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1, matrices_size));
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1_const, matrices_size));
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices2, matrices_size));
THCudaCheck(hipMemcpyAsync(d_matrices1, matrices1, matrices_size,
hipMemcpyHostToDevice, THCState_getCurrentStream(state)));
THCudaCheck(hipMemcpyAsync(d_matrices1_const, matrices1_const, matrices_size,
hipMemcpyHostToDevice, THCState_getCurrentStream(state)));
THCudaCheck(hipMemcpyAsync(d_matrices2, matrices2, matrices_size,
hipMemcpyHostToDevice, THCState_getCurrentStream(state)));
int info;
int *info_gpu;
THCudaCheck(THCudaMalloc(state, (void**)&info_gpu, sizeof(int)));
int *ipiv_gpu;
THCudaCheck(THCudaMalloc(state, (void**)&ipiv_gpu, n * sizeof(int)));
// Run LU
#if defined(THC_REAL_IS_FLOAT)
THCudaBlas_Sgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1);
#else
THCudaBlas_Dgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1);
#endif
THCudaCheck(hipMemcpy(&info, info_gpu, sizeof(int), hipMemcpyDeviceToHost));
if (info > 0)
THError("CUBLAS getrf : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("CUBLAS getrf : Argument %d : illegal value", -info);
// Inverse
#if defined(THC_REAL_IS_FLOAT)
THCudaBlas_Sgetri(state, n, d_matrices1_const, n, ipiv_gpu, d_matrices2, n, info_gpu, 1);
#else
THCudaBlas_Dgetri(state, n, d_matrices1_const, n, ipiv_gpu, d_matrices2, n, info_gpu, 1);
#endif
if (info > 0)
THError("CUBLAS getri : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("CUBLAS getri : Argument %d : illegal value", -info);
THCudaCheck(THCudaFree(state, ipiv_gpu));
THCudaCheck(THCudaFree(state, info_gpu));
THCTensor_(freeCopyTo)(state, output, input);
#endif
}
__global__ void THCTensor_(copyUpperSymmetric)(real *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r > c) {
input[idx] = input[r*n + c];
}
}
}
__global__ void THCTensor_(copyLowerSymmetric)(real *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r < c) {
input[idx] = input[r*n + c];
}
}
}
THC_API void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int n = a->size[0];
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
real *input_data = THCTensor_(data)(state, input);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotri_gpu(ul, n, input_data, n, &info);
#else
magma_dpotri_gpu(ul, n, input_data, n, &info);
#endif
if (info > 0)
THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potri : Argument %d : illegal value", -info);
hipStream_t stream = THCState_getCurrentStream(state);
const int len = n*n;
dim3 blocks(::min(DIVUP(len, 128), 65535));
dim3 threads(128);
if (uplo[0] == 'U') {
hipLaunchKernelGGL(( THCTensor_(copyUpperSymmetric)), dim3(blocks), dim3(threads), 0, stream, input_data, n, len);
} else {
hipLaunchKernelGGL(( THCTensor_(copyLowerSymmetric)), dim3(blocks), dim3(threads), 0, stream, input_data, n, len);
}
THCTensor_(freeCopyTo)(state, input, ra_);
#else
THError(NoMagma(potri));
#endif
}
THC_API void THCTensor_(potrf)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int n = a->size[0];
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
real *input_data = THCTensor_(data)(state, input);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotrf_gpu(ul, n, input_data, n, &info);
#else
magma_dpotrf_gpu(ul, n, input_data, n, &info);
#endif
// check error value
if (info > 0)
THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potrf : Argument %d : illegal value", -info);
if (uplo[0] == 'U') {
THCTensor_(triu)(state, ra_, input, 0);
} else {
THCTensor_(tril)(state, ra_, input, 0);
}
THCTensor_(free)(state, input);
#else
THError(NoMagma(potrf));
#endif
}
THC_API void THCTensor_(potrs)(THCState *state, THCTensor *rb_, THCTensor *b, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int n = a->size[0];
int nrhs = b->size[1];
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *b_ = THCTensor_(newColumnMajor)(state, rb_, b);
real *b_data = THCTensor_(data)(state, b_);
THCTensor *a_ = THCTensor_(newColumnMajor)(state, a, a);
real *a_data = THCTensor_(data)(state, a_);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info);
#else
magma_dpotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info);
#endif
// check error value
if (info < 0)
THError("MAGMA potrs : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, b_, rb_);
THCTensor_(free)(state, a_);
#else
THError(NoMagma(potrs));
#endif
}
THC_API void THCTensor_(qr)(THCState *state, THCTensor *rq_, THCTensor *rr_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 2, "A should be 2 dimensional");
THCTensor *a = THCTensor_(newColumnMajor)(state, rr_, a_);
int m = a->size[0];
int n = a->size[1];
int k = (m < n ? m : n);
#ifdef MAGMA_V2
int nb = magma_get_sgeqrf_nb(m, n);
#else
int nb = magma_get_sgeqrf_nb(m);
#endif
real *a_data = THCTensor_(data)(state, a);
real *tau_data = th_magma_malloc_pinned<real>(n*n);
THCTensor *work = THCTensor_(newWithSize1d)(state, (2*k + ((n+31)/32)*32)*nb);
real *work_data = THCTensor_(data)(state, work);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info);
#else
magma_dgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info);
#endif
if (info != 0)
THError("MAGMA geqrf : Argument %d : illegal value.", -info);
THCTensor *q = THCTensor_(newColumnMajor)(state, rq_, a);
real *q_data = THCTensor_(data)(state, q);
THCTensor_(narrow)(state, a, a, 0, 0, k);
THCTensor_(triu)(state, rr_, a, 0);
THCTensor_(free)(state, a);
#if defined(THC_REAL_IS_FLOAT)
magma_sorgqr_gpu(m, n, k, q_data, m, tau_data, work_data, nb, &info);
#else
magma_dorgqr_gpu(m, n, k, q_data, m, tau_data, work_data, nb, &info);
#endif
if (info != 0)
THError("MAGMA orgqr : Argument %d : illegal value.", -info);
THCTensor_(free)(state, work);
magma_free_pinned(tau_data);
THCTensor_(narrow)(state, q, q, 1, 0, k);
THCTensor_(freeCopyTo)(state, q, rq_);
#else
THError(NoMagma(qr));
#endif
}
#endif
#endif
| b4020bca3d30feb44b30237a8a04f9c7041bd528.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathMagma.cu"
#else
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
#ifdef USE_MAGMA
static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, real *src, int k)
{
long size[1] = { k };
long stride[1] = { 1 };
THCTensor_(rawResize)(state, self, 1, size, stride);
size_t len = k * sizeof(real);
THCudaCheck(cudaMemcpy(self->storage->data + self->storageOffset, src, len, cudaMemcpyHostToDevice));
}
static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, real *src, int m, int n)
{
long size[2] = { m, n };
long stride[2] = { 1, m };
THCTensor_(rawResize)(state, self, 2, size, stride);
size_t len = m * n * sizeof(real);
THCudaCheck(cudaMemcpy(self->storage->data + self->storageOffset, src, len, cudaMemcpyHostToDevice));
}
static void THCTensor_(copyTensor2d)(THCState *state, real *dst, THCTensor *self)
{
THAssert(self->nDimension == 2);
size_t len = THCTensor_(nElement)(state, self)*sizeof(real);
THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1);
THCTensor *selfc = THCTensor_(newContiguous)(state, temp);
THCudaCheck(cudaMemcpy(dst, selfc->storage->data + selfc->storageOffset, len, cudaMemcpyDeviceToHost));
THCTensor_(free)(state, temp);
THCTensor_(free)(state, selfc);
}
#endif // USE_MAGMA
static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src)
{
THAssert(src->nDimension == 2);
if (self == src && self->stride[0] == 1 && self->stride[1] == self->size[0])
{
THCTensor_(retain)(state, self);
return self;
}
if (self == src)
self = THCTensor_(new)(state);
else
THCTensor_(retain)(state, self);
long size[2] = { src->size[0], src->size[1] };
long stride[2] = { 1, src->size[0] };
THCTensor_(rawResize)(state, self, 2, size, stride);
THCTensor_(copy)(state, self, src);
return self;
}
THC_API void THCTensor_(gesv)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional");
THArgCheck(b_->nDimension == 2, 2, "b should be 2 dimensional");
THArgCheck(a_->size[0] == a_->size[1], 1, "A should be square");
THArgCheck(b_->size[0] == a_->size[0], 2, "A,b size incompatible");
int n = a_->size[0];
int nrhs = b_->size[1];
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
real *a_data = THCTensor_(data)(state, a);
real *b_data = THCTensor_(data)(state, b);
int *ipiv = th_magma_malloc_pinned<int>(n);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info);
#else
magma_dgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info);
#endif
if (info < 0)
THError("MAGMA gesv : Argument %d : illegal value", -info);
else if (info > 0)
THError("MAGMA gesv : U(%d,%d) is zero, singular U.", info, info);
magma_free_pinned(ipiv);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gesv));
#endif
}
THC_API void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional");
THArgCheck(b_->nDimension == 2, 1, "b should be 2 dimensional");
THArgCheck(a_->size[0] == b_->size[0], 2, "size incompatible A,b");
THArgCheck(a_->size[0] >= a_->size[1], 2, "A should have m >= n");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
real *a_data = THCTensor_(data)(state, a);
real *b_data = THCTensor_(data)(state, b);
int m = a->size[0];
int n = a->size[1];
int nrhs = b->size[1];
real wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#endif
real *hwork = th_magma_malloc_pinned<real>((size_t)wkopt);
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#endif
magma_free_pinned(hwork);
if (info != 0)
THError("MAGMA gels : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gels));
#endif
}
THC_API void THCTensor_(syev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a, const char *jobzs, const char *uplos)
{
#ifdef USE_MAGMA
int n = a->size[0];
int lda = n;
magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower;
magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec;
THCTensor *input = THCTensor_(newColumnMajor)(state, rv_, a);
real *input_data = THCTensor_(data)(state, input);
// eigen values and workspace
real *w = th_magma_malloc_pinned<real>(n);
real *wA = th_magma_malloc_pinned<real>(lda);
// compute optimal size of work array
int info;
real lwork;
int liwork;
#if defined(THC_REAL_IS_FLOAT)
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info);
#else
magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info);
#endif
real *work = th_magma_malloc_pinned<real>((size_t)lwork);
int *iwork = th_magma_malloc_pinned<int>(liwork);
// compute eigenvalues and, optionally, eigenvectors
#if defined(THC_REAL_IS_FLOAT)
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info);
#else
magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info);
#endif
// copy eigen values from w to re_
if (info == 0)
THCTensor_(copyArray1d)(state, re_, w, n);
magma_free_pinned(iwork);
magma_free_pinned(work);
magma_free_pinned(wA);
magma_free_pinned(w);
// check error value
if (info > 0)
THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA syev : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, input, rv_);
#else
THError(NoMagma(syev));
#endif
}
THC_API void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a_, const char *jobvrs)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 3, "A should be 2 dimensional");
THArgCheck(a_->size[0] == a_->size[1], 3, "A should be square");
magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec;
int n = a_->size[0];
real *a_data = th_magma_malloc_pinned<real>(n * n);
THCTensor_(copyTensor2d)(state, a_data, a_);
real *wr = th_magma_malloc_pinned<real>(n);
real *wi = th_magma_malloc_pinned<real>(n);
real *vr_data = NULL;
int ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = th_magma_malloc_pinned<real>(n * n);
ldvr = n;
}
real wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#endif
int lwork = (int) wkopt;
real *work_data = th_magma_malloc_pinned<real>(lwork);
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#endif
if (info > 0)
THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA geev : Argument %d : illegal value", -info);
{
THCTensor_(resize2d)(state, re_, 2, n);
THCTensor *re = THCTensor_(newContiguous)(state, re_);
THCudaCheck(cudaMemcpy(re->storage->data + re->storageOffset, wr, n*sizeof(real), cudaMemcpyHostToDevice));
THCudaCheck(cudaMemcpy(re->storage->data + re->storageOffset + n, wi, n*sizeof(real), cudaMemcpyHostToDevice));
THCTensor_(freeCopyTo)(state, re, re_);
THCTensor_(transpose)(state, re_, NULL, 0, 1);
}
if (jobvr == MagmaVec)
THCTensor_(copyArray2d)(state, rv_, vr_data, n, n);
magma_free_pinned(work_data);
magma_free_pinned(vr_data);
magma_free_pinned(wi);
magma_free_pinned(wr);
magma_free_pinned(a_data);
#else
THError(NoMagma(geev));
#endif
}
THC_API void THCTensor_(gesvd)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *a, const char *jobu)
{
#ifdef USE_MAGMA
THCTensor *ra_ = THCTensor_(new)(state);
THCTensor_(gesvd2)(state, ru_, rs_, rv_, ra_, a, jobu);
THCTensor_(free)(state, ra_);
#else
THError(NoMagma(gesvd));
#endif
}
THC_API void THCTensor_(gesvd2)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *ra_, THCTensor *a, const char *jobus)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
magma_vec_t jobu = jobus[0] == 'A' ? MagmaAllVec : jobus[0] == 'S' ? MagmaSomeVec : jobus[0] == 'O' ? MagmaOverwriteVec : MagmaNoVec;
magma_vec_t jobvt = jobu;
int m = a->size[0];
int n = a->size[1];
int k = m < n ? m : n;
int j = (jobu == MagmaAllVec) ? m : k;
real *a_data = th_magma_malloc_pinned<real>(m * n);
THCTensor_(copyTensor2d)(state, a_data, a);
real *rs_data = th_magma_malloc_pinned<real>(k);
real *ru_data = th_magma_malloc_pinned<real>(m * j);
real *rv_data = th_magma_malloc_pinned<real>(n * n);
real wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgesvd(jobu, jobvt, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, &info);
#else
magma_dgesvd(jobu, jobvt, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, &info);
#endif
int lwork = (int) wkopt;
real *work_data = th_magma_malloc_pinned<real>(lwork);
#if defined(THC_REAL_IS_FLOAT)
magma_sgesvd(jobu, jobvt, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, &info);
#else
magma_dgesvd(jobu, jobvt, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, &info);
#endif
if (info > 0)
THError("MAGMA gesvd : %d superdiagonals failed to converge", info);
else if (info < 0)
THError("MAGMA gesvd : Argument %d : illegal value", -info);
THCTensor_(copyArray2d)(state, rv_, rv_data, n, n);
THCTensor_(transpose)(state, rv_, NULL, 0, 1);
THCTensor_(copyArray2d)(state, ru_, ru_data, m, j);
THCTensor_(copyArray1d)(state, rs_, rs_data, k);
THCTensor_(copyArray2d)(state, ra_, a_data, m, n);
magma_free_pinned(work_data);
magma_free_pinned(rv_data);
magma_free_pinned(ru_data);
magma_free_pinned(rs_data);
magma_free_pinned(a_data);
#else
THError(NoMagma(gesvd2));
#endif
}
THC_API void THCTensor_(getri)(THCState *state, THCTensor *ra_, THCTensor *a)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int info;
int n = a->size[0];
int lwork = n * magma_get_sgetri_nb(n);
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
real *input_data = THCTensor_(data)(state, input);
int *ipiv = th_magma_malloc_pinned<int>(n);
THCTensor *work = THCTensor_(newWithSize1d)(state, lwork);
real *work_data = THCTensor_(data)(state, work);
// Run LU
#if defined(THC_REAL_IS_FLOAT)
magma_sgetrf_gpu(n, n, input_data, n, ipiv, &info);
#else
magma_dgetrf_gpu(n, n, input_data, n, ipiv, &info);
#endif
if (info > 0)
THError("MAGMA getrf : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("MAGMA getrf : Argument %d : illegal value", -info);
// Inverse
#if defined(THC_REAL_IS_FLOAT)
magma_sgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info);
#else
magma_dgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info);
#endif
if (info > 0)
THError("MAGMA getri : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("MAGMA getri : Argument %d : illegal value", -info);
THCTensor_(free)(state, work);
magma_free_pinned(ipiv);
THCTensor_(freeCopyTo)(state, input, ra_);
#else
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int n = a->size[0];
// input
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
// output
THCTensor *output = THCTensor_(newColumnMajor)(state, ra_, a);
size_t matrices_size = sizeof(real*);
real **matrices1 = (real **)THAlloc(matrices_size);
const real **matrices1_const = (const real **)THAlloc(matrices_size);
real **matrices2 = (real **)THAlloc(matrices_size);
matrices1[0] = THCTensor_(data)(state, input);
matrices1_const[0] = THCTensor_(data)(state, input);
matrices2[0] = THCTensor_(data)(state, output);
// Copy pointers to device.
real **d_matrices1, **d_matrices2;
const real **d_matrices1_const;
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1, matrices_size));
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1_const, matrices_size));
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices2, matrices_size));
THCudaCheck(cudaMemcpyAsync(d_matrices1, matrices1, matrices_size,
cudaMemcpyHostToDevice, THCState_getCurrentStream(state)));
THCudaCheck(cudaMemcpyAsync(d_matrices1_const, matrices1_const, matrices_size,
cudaMemcpyHostToDevice, THCState_getCurrentStream(state)));
THCudaCheck(cudaMemcpyAsync(d_matrices2, matrices2, matrices_size,
cudaMemcpyHostToDevice, THCState_getCurrentStream(state)));
int info;
int *info_gpu;
THCudaCheck(THCudaMalloc(state, (void**)&info_gpu, sizeof(int)));
int *ipiv_gpu;
THCudaCheck(THCudaMalloc(state, (void**)&ipiv_gpu, n * sizeof(int)));
// Run LU
#if defined(THC_REAL_IS_FLOAT)
THCudaBlas_Sgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1);
#else
THCudaBlas_Dgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1);
#endif
THCudaCheck(cudaMemcpy(&info, info_gpu, sizeof(int), cudaMemcpyDeviceToHost));
if (info > 0)
THError("CUBLAS getrf : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("CUBLAS getrf : Argument %d : illegal value", -info);
// Inverse
#if defined(THC_REAL_IS_FLOAT)
THCudaBlas_Sgetri(state, n, d_matrices1_const, n, ipiv_gpu, d_matrices2, n, info_gpu, 1);
#else
THCudaBlas_Dgetri(state, n, d_matrices1_const, n, ipiv_gpu, d_matrices2, n, info_gpu, 1);
#endif
if (info > 0)
THError("CUBLAS getri : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("CUBLAS getri : Argument %d : illegal value", -info);
THCudaCheck(THCudaFree(state, ipiv_gpu));
THCudaCheck(THCudaFree(state, info_gpu));
THCTensor_(freeCopyTo)(state, output, input);
#endif
}
__global__ void THCTensor_(copyUpperSymmetric)(real *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r > c) {
input[idx] = input[r*n + c];
}
}
}
__global__ void THCTensor_(copyLowerSymmetric)(real *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r < c) {
input[idx] = input[r*n + c];
}
}
}
THC_API void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int n = a->size[0];
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
real *input_data = THCTensor_(data)(state, input);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotri_gpu(ul, n, input_data, n, &info);
#else
magma_dpotri_gpu(ul, n, input_data, n, &info);
#endif
if (info > 0)
THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potri : Argument %d : illegal value", -info);
cudaStream_t stream = THCState_getCurrentStream(state);
const int len = n*n;
dim3 blocks(std::min(DIVUP(len, 128), 65535));
dim3 threads(128);
if (uplo[0] == 'U') {
THCTensor_(copyUpperSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len);
} else {
THCTensor_(copyLowerSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len);
}
THCTensor_(freeCopyTo)(state, input, ra_);
#else
THError(NoMagma(potri));
#endif
}
THC_API void THCTensor_(potrf)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int n = a->size[0];
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
real *input_data = THCTensor_(data)(state, input);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotrf_gpu(ul, n, input_data, n, &info);
#else
magma_dpotrf_gpu(ul, n, input_data, n, &info);
#endif
// check error value
if (info > 0)
THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potrf : Argument %d : illegal value", -info);
if (uplo[0] == 'U') {
THCTensor_(triu)(state, ra_, input, 0);
} else {
THCTensor_(tril)(state, ra_, input, 0);
}
THCTensor_(free)(state, input);
#else
THError(NoMagma(potrf));
#endif
}
THC_API void THCTensor_(potrs)(THCState *state, THCTensor *rb_, THCTensor *b, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int n = a->size[0];
int nrhs = b->size[1];
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *b_ = THCTensor_(newColumnMajor)(state, rb_, b);
real *b_data = THCTensor_(data)(state, b_);
THCTensor *a_ = THCTensor_(newColumnMajor)(state, a, a);
real *a_data = THCTensor_(data)(state, a_);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info);
#else
magma_dpotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info);
#endif
// check error value
if (info < 0)
THError("MAGMA potrs : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, b_, rb_);
THCTensor_(free)(state, a_);
#else
THError(NoMagma(potrs));
#endif
}
THC_API void THCTensor_(qr)(THCState *state, THCTensor *rq_, THCTensor *rr_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 2, "A should be 2 dimensional");
THCTensor *a = THCTensor_(newColumnMajor)(state, rr_, a_);
int m = a->size[0];
int n = a->size[1];
int k = (m < n ? m : n);
#ifdef MAGMA_V2
int nb = magma_get_sgeqrf_nb(m, n);
#else
int nb = magma_get_sgeqrf_nb(m);
#endif
real *a_data = THCTensor_(data)(state, a);
real *tau_data = th_magma_malloc_pinned<real>(n*n);
THCTensor *work = THCTensor_(newWithSize1d)(state, (2*k + ((n+31)/32)*32)*nb);
real *work_data = THCTensor_(data)(state, work);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info);
#else
magma_dgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info);
#endif
if (info != 0)
THError("MAGMA geqrf : Argument %d : illegal value.", -info);
THCTensor *q = THCTensor_(newColumnMajor)(state, rq_, a);
real *q_data = THCTensor_(data)(state, q);
THCTensor_(narrow)(state, a, a, 0, 0, k);
THCTensor_(triu)(state, rr_, a, 0);
THCTensor_(free)(state, a);
#if defined(THC_REAL_IS_FLOAT)
magma_sorgqr_gpu(m, n, k, q_data, m, tau_data, work_data, nb, &info);
#else
magma_dorgqr_gpu(m, n, k, q_data, m, tau_data, work_data, nb, &info);
#endif
if (info != 0)
THError("MAGMA orgqr : Argument %d : illegal value.", -info);
THCTensor_(free)(state, work);
magma_free_pinned(tau_data);
THCTensor_(narrow)(state, q, q, 1, 0, k);
THCTensor_(freeCopyTo)(state, q, rq_);
#else
THError(NoMagma(qr));
#endif
}
#endif
#endif
|
4d507d69c3cb90d65eb136130cd922376409a8c2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
// Print device properties
void printDevProp(hipDeviceProp_t devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
int main()
{
// Number of CUDA devices
int devCount;
hipGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
printf("\nPress any key to exit...");
char c;
scanf("%c", &c);
return 0;
}
| 4d507d69c3cb90d65eb136130cd922376409a8c2.cu | #include <stdio.h>
// Print device properties
void printDevProp(cudaDeviceProp devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
int main()
{
// Number of CUDA devices
int devCount;
cudaGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
printf("\nPress any key to exit...");
char c;
scanf("%c", &c);
return 0;
}
|
f0c3f732f99446195ede43da45c4c6e874aca5c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Implements the math functions for GPU.
#include "caffe2/utils/math.h"
#include <cstring>
#include <limits>
#include <numeric>
#include <vector>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/fixed_divisor.h"
#include "caffe2/utils/math_utils.h"
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
namespace {
#define DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Func, expr) \
template <typename T> \
struct Func##Functor { \
inline __host__ __device__ T \
operator()(const T& lhs, const T& rhs) const { \
return lhs expr rhs; \
} \
}; \
template <> \
struct Func##Functor<float16> { \
inline __host__ __device__ float16 \
operator()(const float16& lhs, const float16& rhs) const { \
return convert::To<float, float16>(convert::To<float16, float>( \
lhs) expr convert::To<float16, float>(rhs)); \
} \
};
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Add, +)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Sub, -)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Mul, *)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Div, /)
#undef DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR
template <typename T>
__global__ void SinCosCUDAKernel(const int N, const T* X, T* S, T* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
sincos(__ldg(X + i), S + i, C + i);
#else
sincos(X[i], S + i, C + i);
#endif
}
}
template <typename TIn, typename TOut, class BinaryOperator>
__global__ void SimpleBinaryOpCUDAKernel(
const int N,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
C[i] = op(A[i], B[i]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void RowwiseBinaryOpCUDAKenel(
const int size,
const FixedDivisor<int> cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int j = cols.Mod(C_index);
const int A_index = broadcast_1st ? j : C_index;
const int B_index = broadcast_1st ? C_index : j;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void ColwiseBinaryOpCUDAKenel(
const int size,
const FixedDivisor<int> cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int i = cols.Div(C_index);
const int A_index = broadcast_1st ? i : C_index;
const int B_index = broadcast_1st ? C_index : i;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
__global__ void BroadcastBinaryOpCUDAKernel(
const int size,
const SimpleArray<int, D> A_strides,
const SimpleArray<int, D> B_strides,
const SimpleArray<FixedDivisor<int>, D> C_dims,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
int A_index = 0;
int B_index = 0;
int C_index_val = C_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
C_dims.data[i].DivMod(C_index_val, &C_index_val, &d);
A_index += d * A_strides.data[i];
B_index += d * B_strides.data[i];
}
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BinaryOpWith2DBroadcasting(
const int rows,
const int cols,
const bool rowwise_broadcast,
const bool broadcast_1st,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
if (rows == 0 || cols == 0) {
return;
}
const int size = rows * cols;
const FixedDivisor<int> cols_div(cols);
if (rowwise_broadcast) {
if (broadcast_1st) {
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
} else {
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
}
} else {
if (broadcast_1st) {
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
} else {
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
}
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOpImpl(
const int* A_dims,
const int* B_dims,
const int* C_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
SimpleArray<int, D> A_strides_array;
SimpleArray<int, D> B_strides_array;
SimpleArray<FixedDivisor<int>, D> C_dims_array;
int A_stride = 1;
int B_stride = 1;
for (int i = D - 1; i >= 0; --i) {
if (C_dims[i] == 0) {
return;
}
A_strides_array.data[i] = A_dims[i] == 1 ? 0 : A_stride;
B_strides_array.data[i] = B_dims[i] == 1 ? 0 : B_stride;
A_stride *= A_dims[i];
B_stride *= B_dims[i];
C_dims_array.data[i] = FixedDivisor<int>(C_dims[i]);
}
const int size =
std::accumulate(C_dims, C_dims + D, 1, std::multiplies<int>());
hipLaunchKernelGGL(( BroadcastBinaryOpCUDAKernel<TIn, TOut, BinaryOperator, D>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
size, A_strides_array, B_strides_array, C_dims_array, op, A, B, C);
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOp(
const int A_ndim,
const int* A_dims,
const int B_ndim,
const int* B_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
const int ndim = ::max(A_ndim, B_ndim);
std::vector<int> A_dims_array(ndim);
std::vector<int> B_dims_array(ndim);
std::vector<int> C_dims_array(ndim);
utils::ComputeBroadcastBinaryOpDims(
A_ndim,
A_dims,
B_ndim,
B_dims,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data());
if (A_dims_array == B_dims_array) {
const int size = std::accumulate(
C_dims_array.cbegin(), C_dims_array.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( SimpleBinaryOpCUDAKernel<TIn, TOut, BinaryOperator>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, op, A, B, C);
return;
}
int rows;
int cols;
bool broadcast_1st;
if (utils::IsRowwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, true, broadcast_1st, op, A, B, C, context);
return;
}
if (utils::IsColwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, false, broadcast_1st, op, A, B, C, context);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3(
ndim,
BroadcastBinaryOpImpl,
TIn,
TOut,
BinaryOperator,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data(),
op,
A,
B,
C,
context);
}
} // namespace
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Func, op) \
__global__ void Func##CUDAKernel(const int N, const T* X, T* Y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
Y[i] = op(X[i]); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( Func##CUDAKernel), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, x, y); \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Acos, acosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Asin, asinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tan, tanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Atan, atanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sinh, sinhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cosh, coshf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tanh, tanhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, utils::Square<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Rsqrt, rsqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cbrt, cbrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cube, utils::Cube<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Cube, utils::Cube<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Cube,
utils::Cube<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Cube,
utils::Cube<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(bool, Not, utils::Not)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Neg, utils::Negate<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Neg, utils::Negate<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Neg,
utils::Negate<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Neg,
utils::Negate<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sign, utils::Sign<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sign, utils::Sign<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Sign,
utils::Sign<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Sign,
utils::Sign<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Inv, utils::Inv<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Inv, utils::Inv<double>)
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_SINCOS(T) \
template <> \
CAFFE2_CUDA_EXPORT void SinCos<T, CUDAContext>( \
const int N, const T* x, T* ys, T* yc, CUDAContext* context) { \
hipLaunchKernelGGL(( SinCosCUDAKernel), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, x, ys, yc); \
}
CAFFE2_SPECIALIZED_CUDA_SINCOS(float)
CAFFE2_SPECIALIZED_CUDA_SINCOS(double)
#undef CAFFE2_SPECIALIZED_CUDA_SINCOS
#define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \
const int N, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
hipLaunchKernelGGL(( SimpleBinaryOpCUDAKernel<TIn, TOut, Op<TIn>>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, Op<TIn>(), A, B, C); \
}
#define DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION
#define DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float16, float16, Func, Op)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_SIMPLE_CUDA_BINARY_FUNCTION
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
float,
float,
ElemwiseMax,
thrust::maximum);
#undef DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION
#define DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
}
#define DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float16, float16, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \
const int A_ndim, \
const int* A_dims, \
const int B_ndim, \
const int* B_dims, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
BroadcastBinaryOp<TIn, TOut, Op<TIn>>( \
A_ndim, A_dims, B_ndim, B_dims, Op<TIn>(), A, B, C, context); \
}
#define DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float16, float16, Func, Op)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \
template <> \
CAFFE2_CUDA_EXPORT void Funcname<T, CUDAContext>( \
const int N, \
const T* src, \
T* dst, \
Tensor* scratch_ptr, \
CUDAContext* context) { \
size_t memRequired = 0; \
hipcub::DeviceReduce::func( \
nullptr, memRequired, src, dst, N, context->cuda_stream()); \
auto buffer_size = \
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T)); \
scratch_ptr->Resize(std::vector<TIndex>{buffer_size}); \
hipcub::DeviceReduce::func( \
static_cast<void*>(scratch_ptr->mutable_data<T>()), \
memRequired, \
src, \
dst, \
N, \
context->cuda_stream()); \
}
DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min)
DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max)
#undef DELEGATE_REDUCTION_FUNCTION
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
N));
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<float16, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
A,
HIP_R_16F,
lda,
&beta,
C,
HIP_R_16F,
N));
} else if (math_type == TensorProto_DataType_FLOAT16) {
// convert alpha, beta from float -> __half
const __half alpha_fp16 = convert::floatToHalf(alpha);
const __half beta_fp16 = convert::floatToHalf(beta);
// call hipblasHgemm
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
(const __half*)A,
lda,
&beta_fp16,
(__half*)C,
N));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
template <>
CAFFE2_CUDA_EXPORT void BiasCHW<float, CUDAContext>(
const float* bias,
const float* bias_multiplier,
const int bias_channels,
const int image_size,
float* image,
CUDAContext* context) {
Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
bias_channels,
image_size,
1,
1,
bias,
bias_multiplier,
1,
image,
context);
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float** A,
const float** B,
const float beta,
float** C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
thrust::device_vector<const float*> A_device(A, A + batch_size);
thrust::device_vector<const float*> B_device(B, B + batch_size);
thrust::device_vector<float*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
ldb,
A_device.data().get(),
lda,
&beta,
C_device.data().get(),
ldc,
batch_size));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
B_stride,
A,
lda,
A_stride,
&beta,
C,
ldc,
C_stride,
batch_size));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<float16, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float16** A,
const float16** B,
const float beta,
float16** C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 9
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float16, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
#if TORCH_HIP_VERSION < 9010
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float16, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
thrust::device_vector<const void*> A_device(A, A + batch_size);
thrust::device_vector<const void*> B_device(B, B + batch_size);
thrust::device_vector<void*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasGemmBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
HIP_R_16F,
ldb,
A_device.data().get(),
HIP_R_16F,
lda,
&beta,
C_device.data().get(),
HIP_R_16F,
ldc,
batch_size,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#endif
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = convert::floatToHalf(alpha);
const __half beta_fp16 = convert::floatToHalf(beta);
std::vector<const __half*> A_array(batch_size);
std::vector<const __half*> B_array(batch_size);
std::vector<__half*> C_array(batch_size);
for (int i = 0; i < batch_size; ++i) {
A_array[i] = reinterpret_cast<const __half*>(A[i]);
B_array[i] = reinterpret_cast<const __half*>(B[i]);
C_array[i] = reinterpret_cast<__half*>(C[i]);
}
thrust::device_vector<const __half*> A_device(
A_array.cbegin(), A_array.cend());
thrust::device_vector<const __half*> B_device(
B_array.cbegin(), B_array.cend());
thrust::device_vector<__half*> C_device(C_array.cbegin(), C_array.cend());
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
B_device.data().get(),
ldb,
A_device.data().get(),
lda,
&beta_fp16,
C_device.data().get(),
ldc,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<float16, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const int A_stride,
const float16* B,
const int B_stride,
const float beta,
float16* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float16, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
#if TORCH_HIP_VERSION < 9010
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float16, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasGemmStridedBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
B_stride,
A,
HIP_R_16F,
lda,
A_stride,
&beta,
C,
HIP_R_16F,
ldc,
C_stride,
batch_size,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#endif
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = convert::floatToHalf(alpha);
const __half beta_fp16 = convert::floatToHalf(beta);
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
B_stride,
(const __half*)A,
lda,
A_stride,
&beta_fp16,
(__half*)C,
ldc,
C_stride,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
#endif
}
#if TORCH_HIP_VERSION >= 9000
// No change, but required. Defer to default CUDA engine
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
return Gemm<float, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<float16, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
// enable TensorCore for this call on this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_TENSOR_OP_MATH));
}
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasGemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
A,
HIP_R_16F,
lda,
&beta,
C,
HIP_R_16F,
N,
HIP_R_32F,
CUBLAS_GEMM_DFALT_TENSOR_OP));
// Now disable TensorCore math for subsequent calls to this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_DEFAULT_MATH));
}
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
return GemmStridedBatched<float, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<float16, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const int A_stride,
const float16* B,
const int B_stride,
const float beta,
float16* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
return GemmStridedBatched<float16, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
#endif // TORCH_HIP_VERSION >= 9000
template <>
CAFFE2_CUDA_EXPORT void GemmEx<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int lda,
const float* B,
const int ldb,
const float beta,
float* C,
const int ldc,
CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
ldc));
}
template <>
CAFFE2_CUDA_EXPORT void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemv(
context->cublas_handle(),
cu_trans_A,
N,
M,
&alpha,
A,
N,
x,
1,
&beta,
y,
1));
}
// Batched Add variants
namespace {
template <typename T>
__global__ void AddStripedBatchKernel(
const int N,
const T* first,
T* Y,
const int stripe,
const int batch) {
for (int j = 0; j < batch; j++) {
const T* x = first + j * stripe;
CUDA_1D_KERNEL_LOOP(i, N) {
float tmpY = convert::To<T, float>(Y[i]);
tmpY += convert::To<T, float>(x[i]);
Y[i] = convert::To<float, T>(tmpY);
}
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \
template <> \
CAFFE2_CUDA_EXPORT void AddStripedBatch<T, CUDAContext>( \
const int N, \
const T* first, \
T* Y, \
const int stripe, \
const int batch, \
CUDAContext* context) { \
hipLaunchKernelGGL(( AddStripedBatchKernel<T>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, first, Y, stripe, batch); \
}
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float);
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float16);
#undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH
template <>
CAFFE2_CUDA_EXPORT void Gemv<float16, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const float16* A,
const float16* x,
const float beta,
float16* y,
CUDAContext* context,
TensorProto::DataType math_type) {
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
// sort out what we need to call cublasSgemmEx / hipblasHgemm
const int m = (cu_trans_A == HIPBLAS_OP_N) ? N : M;
const int k = (cu_trans_A == HIPBLAS_OP_N) ? M : N;
const int lda = (cu_trans_A == HIPBLAS_OP_N) ? m : k;
const int ldc = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_A,
HIPBLAS_OP_N,
m,
1,
k,
&alpha,
A,
HIP_R_16F,
lda,
x,
HIP_R_16F,
k,
&beta,
y,
HIP_R_16F,
ldc));
} else if (math_type == TensorProto_DataType_FLOAT16) {
const __half alpha_fp16 = convert::floatToHalf(alpha);
const __half beta_fp16 = convert::floatToHalf(beta);
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemm(
context->cublas_handle(),
cu_trans_A,
HIPBLAS_OP_N,
m,
1,
k,
&alpha_fp16,
(const __half*)A,
lda,
(const __half*)x,
k,
&beta_fp16,
(__half*)y,
ldc));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
namespace {
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
CAFFE2_CUDA_API void Set<T, CUDAContext>( \
const size_t N, const T alpha, T* Y, CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (alpha == T(0)) { \
hipMemsetAsync(Y, 0, sizeof(T) * N, context->cuda_stream()); \
} else { \
hipLaunchKernelGGL(( SetKernel<T>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, alpha, Y); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
CAFFE2_SPECIALIZED_CUDA_SET(double);
CAFFE2_SPECIALIZED_CUDA_SET(bool);
CAFFE2_SPECIALIZED_CUDA_SET(int8_t);
CAFFE2_SPECIALIZED_CUDA_SET(int16_t);
CAFFE2_SPECIALIZED_CUDA_SET(int);
CAFFE2_SPECIALIZED_CUDA_SET(int64_t);
CAFFE2_SPECIALIZED_CUDA_SET(char);
CAFFE2_SPECIALIZED_CUDA_SET(uint8_t);
CAFFE2_SPECIALIZED_CUDA_SET(uint16_t);
#undef CAFFE2_SPECIALIZED_CUDA_SET
template <>
CAFFE2_CUDA_EXPORT void Set<float16, CUDAContext>(
const size_t N,
const float16 alpha,
float16* Y,
CUDAContext* context) {
if (N > 0) {
hipLaunchKernelGGL(( SetKernel<float16>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, alpha, Y);
}
}
namespace {
template <typename T>
__global__ void
UniformShift(const size_t N, const float min, const float max, T* x) {
float scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min);
}
}
__global__ void
UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void RandUniform<float, CUDAContext>(
const size_t n,
const float min,
const float max,
float* r,
CUDAContext* context) {
CURAND_ENFORCE(hiprandGenerateUniform(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<float>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, min, max, r);
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<double, CUDAContext>(
const size_t n,
const double min,
const double max,
double* r,
CUDAContext* context) {
CURAND_ENFORCE(
hiprandGenerateUniformDouble(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<double>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, min, max, r);
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<int, CUDAContext>(
const size_t n,
const int min,
const int max,
int* r,
CUDAContext* context) {
CURAND_ENFORCE(hiprandGenerate(
context->curand_generator(), reinterpret_cast<unsigned int*>(r), n));
hipLaunchKernelGGL(( UniformIntFit),
dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
n, min, max, reinterpret_cast<unsigned int*>(r));
}
template <typename T>
size_t HandleOddLengthRandGaussian(
const size_t n,
const T mean,
const T std,
T* r,
CUDAContext* context) {
if (n % 2 == 1) {
std::default_random_engine generator;
std::normal_distribution<T> distribution(mean, std);
const T random_value = distribution(generator);
Set<T, CUDAContext>(1, random_value, r + (n - 1), context);
return n - 1;
}
return n;
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<float, CUDAContext>(
const size_t n,
const float mean,
const float std,
float* r,
CUDAContext* context) {
// If n is odd, we add a random Gaussian value at the end manually
// and generate n-1 random values using hiprandGenerateNormal.
// hiprandGenerateNormal requires n to be even.
const size_t even_n =
HandleOddLengthRandGaussian<float>(n, mean, std, r, context);
CURAND_ENFORCE(
hiprandGenerateNormal(context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<double, CUDAContext>(
const size_t n,
const double mean,
const double std,
double* r,
CUDAContext* context) {
const size_t even_n =
HandleOddLengthRandGaussian<double>(n, mean, std, r, context);
CURAND_ENFORCE(hiprandGenerateNormalDouble(
context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<float, CUDAContext>(
const int n,
const float* a,
const float* b,
float* y,
CUDAContext* context) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasSdot(context->cublas_handle(), n, a, 1, b, 1, y));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<float16, CUDAContext>(
const int n,
const float16* a,
const float16* b,
float16* y,
CUDAContext* context) {
// execute with 32-bit math
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasDotEx_v2(
context->cublas_handle(),
n,
a,
HIP_R_16F,
1,
b,
HIP_R_16F,
1,
y,
HIP_R_16F,
HIP_R_32F));
}
// A previous version of caffe2 used Thrust but it turns out that thrust
// reduction has an implicit scratch space allocation and deallocation, which
// may interfere with NCCL and create a deadlock. Hence we are using a custom
// reduction here.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y, bool square) {
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A multilevel reduction.
// N -> 128
if (!square) {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += convert::To<T, float>(X[i]);
}
} else {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
float Xi = convert::To<T, float>(X[i]);
reduction_buffer[idx] += Xi * Xi;
}
}
__syncthreads();
// 128 -> 32
if (idx < 32) {
reduction_buffer[idx] += reduction_buffer[idx + 32] +
reduction_buffer[idx + 64] + reduction_buffer[idx + 96];
}
__syncthreads();
// 32 -> 1
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < 32; ++i) {
tmp += reduction_buffer[i];
}
*Y = convert::To<float, T>(tmp);
}
}
// According to the benchmarks script
// caffe2/caffe2/experiments/python/device_reduce_sum_bench.py,
// device reduce is slower for N <= 10000.
#define DEVICE_REDUCE_SIZE_THRESHOLD 10000
namespace {
template <typename T>
__global__ void SumConvertKernel(float* sum, T* dest) {
*dest = convert::To<float, T>(*sum);
}
template <typename T, typename IterT>
CAFFE2_CUDA_EXPORT void SumGenericIter(
const int N,
IterT it,
T*& dest,
CUDAContext* context,
Tensor* scratch_ptr) {
size_t memRequired = 0;
hipcub::DeviceReduce::Sum(
nullptr, memRequired, it, dest, N, context->cuda_stream());
auto buffer_size =
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T));
if (!dest) {
// allocate one more T at the end of scratch for dest
scratch_ptr->Resize(std::vector<TIndex>{buffer_size + 1});
dest = scratch_ptr->template mutable_data<T>() + buffer_size;
} else {
scratch_ptr->Resize(std::vector<TIndex>{buffer_size});
}
hipcub::DeviceReduce::Sum(
static_cast<void*>(scratch_ptr->template mutable_data<T>()),
memRequired,
it,
dest,
N,
context->cuda_stream());
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Sum<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<float>(N, x, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, false);
}
}
template <>
CAFFE2_CUDA_EXPORT void Sum<int32_t, CUDAContext>(
const int N,
const int32_t* x,
int32_t* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<int32_t>(N, x, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, false);
}
}
namespace {
template <typename T>
struct FloatTransform {
inline __host__ __device__ float operator()(const T v) const {
return convert::To<T, float>(v);
}
};
} // namespace
#define CAFFE2_MATH_SUM_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void Sum<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> transform; \
hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \
x, transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \
} else { \
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \
N, x, y, false); \
} \
}
CAFFE2_MATH_SUM_FUNC(float16)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
} // namespace
template <>
CAFFE2_CUDA_EXPORT void SumSqr<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SqrTransform<float> transform;
hipcub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
x, transform);
SumGenericIter<float>(N, it, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, true);
}
}
#define CAFFE2_MATH_SUMSQR_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void SumSqr<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> float_transform; \
hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> \
float_it(x, float_transform); \
SqrTransform<float> sqr_transform; \
hipcub::TransformInputIterator< \
float, \
SqrTransform<float>, \
decltype(float_it)> \
it(float_it, sqr_transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \
} else { \
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \
N, x, y, true); \
} \
}
CAFFE2_MATH_SUMSQR_FUNC(float16)
#undef CAFFE2_MATH_SUMSQR_FUNC
#undef DEVICE_REDUCE_SIZE_THRESHOLD
namespace {
template <typename T>
__global__ void
SelectKernel(const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Select<float, CUDAContext>(
const int N,
const int D,
const float* x,
const int* idx,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<float>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, x, idx, y);
}
template <>
CAFFE2_CUDA_EXPORT void Select<float16, CUDAContext>(
const int N,
const int D,
const float16* x,
const int* idx,
float16* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<float16>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, x, idx, y);
}
namespace {
template <typename TAlpha, typename TData>
__global__ void
ScaleCUDAKernel(const int n, const TAlpha alpha, const TData* x, TData* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * static_cast<TData>(alpha);
#else
y[i] = x[i] * static_cast<TData>(alpha);
#endif
}
}
template <typename TAlpha, typename TData>
__global__ void
ScaleCUDAKernel(const int n, const TAlpha* alpha, const TData* x, TData* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * static_cast<TData>(__ldg(alpha));
#else
y[i] = x[i] * static_cast<TData>(*alpha);
#endif
}
}
template <typename T>
__global__ void PowKernel(const int n, const T* x, const T exponent, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = powf(x[i], exponent);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Powx<float, CUDAContext>(
const int N,
const float* a,
const float b,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( PowKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, a, b, y);
}
#define DELEGATE_CUBLAS_SCALE_FUNCTION(TAlpha, TData, CuBLASFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (x != y) { \
hipMemcpyAsync( \
y, \
x, \
sizeof(TData) * N, \
hipMemcpyDeviceToDevice, \
context->cuda_stream()); \
} \
if (alpha != TAlpha(1)) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, &alpha, y, 1)); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha* alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (x != y) { \
hipMemcpyAsync( \
y, \
x, \
sizeof(TData) * N, \
hipMemcpyDeviceToDevice, \
context->cuda_stream()); \
} \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, alpha, y, 1)); \
}
DELEGATE_CUBLAS_SCALE_FUNCTION(float, float, hipblasSscal)
DELEGATE_CUBLAS_SCALE_FUNCTION(double, double, hipblasDscal)
#undef DELEGATE_CUBLAS_SCALE_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_SCALE(TAlpha, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (alpha == TAlpha(1)) { \
if (x != y) { \
hipMemcpyAsync( \
y, \
x, \
sizeof(TData) * N, \
hipMemcpyDeviceToDevice, \
context->cuda_stream()); \
} \
return; \
} \
hipLaunchKernelGGL(( ScaleCUDAKernel<TAlpha, TData>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, alpha, x, y); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha* alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
hipLaunchKernelGGL(( ScaleCUDAKernel<TAlpha, TData>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, alpha, x, y); \
}
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int32_t, std::int32_t)
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int64_t, std::int64_t)
#undef CAFFE2_SPECIALIZED_CUDA_SCALE
template <>
CAFFE2_CUDA_EXPORT void Scale<float16, float16, CUDAContext>(
const int N,
const float16 alpha,
const float16* x,
float16* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
hipMemcpyAsync(
y,
x,
sizeof(float16) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasScalEx_v2(
context->cublas_handle(),
N,
&alpha,
HIP_R_16F,
y,
HIP_R_16F,
1,
HIP_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Scale<float16, float16, CUDAContext>(
const int N,
const float16* alpha,
const float16* x,
float16* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
hipMemcpyAsync(
y,
x,
sizeof(float16) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasScalEx_v2(
context->cublas_handle(),
N,
alpha,
HIP_R_16F,
y,
HIP_R_16F,
1,
HIP_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Scale<float, float16, CUDAContext>(
const int N,
const float alpha,
const float16* x,
float16* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
hipMemcpyAsync(
y,
x,
sizeof(float16) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
if (alpha != 1.0f) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasScalEx_v2(
context->cublas_handle(),
N,
&alpha,
HIP_R_32F,
y,
HIP_R_16F,
1,
HIP_R_32F));
}
}
template <>
CAFFE2_CUDA_EXPORT void Scale<float, float16, CUDAContext>(
const int N,
const float* alpha,
const float16* x,
float16* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
hipMemcpyAsync(
y,
x,
sizeof(float16) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasScalEx_v2(
context->cublas_handle(),
N,
alpha,
HIP_R_32F,
y,
HIP_R_16F,
1,
HIP_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>(
const int N,
const float alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<double, CUDAContext>(
const int N,
const float alpha,
const double* X,
double* Y,
CUDAContext* context) {
double alpha_d{alpha};
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(
hipblasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<float16, CUDAContext>(
const int N,
const float alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasAxpyEx_v2(
context->cublas_handle(),
N,
&alpha,
HIP_R_32F,
X,
HIP_R_16F,
1,
Y,
HIP_R_16F,
1,
HIP_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>(
const int N,
const float* alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasSaxpy(context->cublas_handle(), N, alpha, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<float16, CUDAContext>(
const int N,
const float* alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasAxpyEx_v2(
context->cublas_handle(),
N,
alpha,
HIP_R_32F,
X,
HIP_R_16F,
1,
Y,
HIP_R_16F,
1,
HIP_R_32F));
}
namespace {
template <typename TCoeff, typename TData>
__global__ void AxpbyCUDAKernel(
const int N,
const TCoeff a,
const TData* x,
const TCoeff b,
TData* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * a + y[i] * b;
#else
y[i] = x[i] * a + y[i] * b;
#endif
}
}
template <>
__global__ void AxpbyCUDAKernel<float, float16>(
const int N,
const float a,
const float16* x,
const float b,
float16* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = convert::To<float, float16>(
convert::To<float16, float>(x[i]) * a +
convert::To<float16, float>(y[i]) * b);
}
}
template <typename TCoeff, typename TData>
__global__ void AxpbyCUDAKernel(
const int N,
const TCoeff* a,
const TData* x,
const TCoeff* b,
TData* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * __ldg(a) + y[i] * __ldg(b);
#else
y[i] = x[i] * *a + y[i] * *b;
#endif
}
}
template <>
__global__ void AxpbyCUDAKernel<float, float16>(
const int N,
const float* a,
const float16* x,
const float* b,
float16* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = convert::To<float, float16>(
convert::To<float16, float>(x[i]) * __ldg(a) +
convert::To<float16, float>(y[i]) * __ldg(b));
#else
y[i] = convert::To<float, float16>(
convert::To<float16, float>(x[i]) * *a +
convert::To<float16, float>(y[i]) * *b);
#endif
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_AXPBY(TCoeff, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \
const int n, \
const TCoeff a, \
const TData* x, \
const TCoeff b, \
TData* y, \
CUDAContext* context) { \
hipLaunchKernelGGL(( AxpbyCUDAKernel<TCoeff, TData>) \
, dim3(CAFFE_GET_BLOCKS(n)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), n, a, x, b, y); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \
const int n, \
const TCoeff* a, \
const TData* x, \
const TCoeff* b, \
TData* y, \
CUDAContext* context) { \
hipLaunchKernelGGL(( AxpbyCUDAKernel<TCoeff, TData>) \
, dim3(CAFFE_GET_BLOCKS(n)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), n, a, x, b, y); \
}
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, float)
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, float16)
#undef CAFFE2_SPECIALIZED_CUDA_AXPBY
namespace {
template <typename T>
__global__ void Im2ColNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int w_out = index % output_w;
const int h_index = index / output_w;
const int h_out = h_index % output_h;
const int channel_in = h_index / output_h;
const int channel_out = channel_in * kernel_h * kernel_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
const int output_size = output_h * output_w;
T* col_data_ptr =
col_data + (channel_out * output_h + h_out) * output_w + w_out;
const T* img_data_ptr =
img_data + (channel_in * input_h + h_in) * input_w + w_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data_ptr + dh * input_w + dw)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data_ptr[dh * input_w + dw]
: 0;
#endif
col_data_ptr += output_size;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Im2ColNHWCCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_w,
const int channels,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int channel_in = index % channels;
const int w_out = index / channels % output_w;
const int h_out = index / channels / output_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
T* col_data_ptr = col_data +
(h_out * output_w + w_out) * channels * kernel_h * kernel_w +
channel_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data + (h * input_w + w) * channels + channel_in)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data[(h * input_w + w) * channels + channel_in]
: 0;
#endif
col_data_ptr += channels;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Col2ImNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int w = index % input_w + pad_l;
const int h = index / input_w % input_h + pad_t;
const int c = index / (input_h * input_w);
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h - h_col * stride_h);
int w_k = (w - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int col_data_index =
(((c * patch_h + h_k) * patch_w + w_k) * output_h + h_col) *
output_w +
w_col;
#if __CUDA_ARCH__ >= 350
val += __ldg(col_data + col_data_index);
#else
val += col_data[col_data_index];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T>
__global__ void Col2ImNHWCCUDAKernel(
const int n,
const int input_w,
const int channels,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int c = index % channels;
const int w = index / channels % input_w + pad_l;
const int h = index / channels / input_w + pad_t;
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
const int channels_col = patch_h * patch_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = h - h_col * stride_h;
int w_k = w - w_col * stride_w;
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int c_col = (h_k * patch_w + w_k) * channels + c;
#if __CUDA_ARCH__ >= 350
val += __ldg(
col_data + (h_col * output_w + w_col) * channels_col + c_col);
#else
val += col_data[(h_col * output_w + w_col) * channels_col + c_col];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T, int N, bool kCol2Im>
__global__ void Im2ColNdNCHWCUDAKernel(
const int outer_size,
const int inner_size,
const int kernel_size,
SimpleArray<int, N + 1> img_shape,
SimpleArray<int, N + 1> col_shape,
SimpleArray<int, N> kernel_shape,
SimpleArray<int, N> stride,
SimpleArray<int, N> dilation,
SimpleArray<int, N> pad,
const T* X_data,
T* Y_data) {
int d_offset[N];
int d_iter[N];
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
int offset_i = i;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_offset[d_i] = offset_i % kernel_shape.data[d_i];
offset_i /= kernel_shape.data[d_i];
}
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int offset_j = j;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_iter[d_i] = offset_j % col_shape.data[d_i + 1];
offset_j /= col_shape.data[d_i + 1];
}
const int col_index = i * inner_size + j;
int img_index = i / kernel_size;
bool is_padding = false;
#pragma unroll
for (int d_i = 0; d_i < N; ++d_i) {
const int d_img = d_iter[d_i] * stride.data[d_i] - pad.data[d_i] +
d_offset[d_i] * dilation.data[d_i];
is_padding |= !utils::IsAGeZeroAndALtB(d_img, img_shape.data[d_i + 1]);
img_index = img_index * img_shape.data[d_i + 1] + d_img;
}
#if __CUDA_ARCH__ >= 350
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : __ldg(X_data + img_index);
} else if (!is_padding) {
atomicAdd(Y_data + img_index, __ldg(X_data + col_index));
}
#else
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : X_data[img_index];
} else if (!is_padding) {
atomicAdd(Y_data + img_index, X_data[col_index]);
}
#endif
}
}
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Im2ColNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
hipLaunchKernelGGL(( Im2ColNdNCHWCUDAKernel<T, N, false>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
img_data,
col_data);
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Col2ImNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Set<T, CUDAContext>(img_size, 0, img_data, context);
hipLaunchKernelGGL(( Im2ColNdNCHWCUDAKernel<T, N, true>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
col_data,
img_data);
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int /* groups */) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * output_h * output_w;
hipLaunchKernelGGL(( Im2ColNCHWCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
img_data,
col_data);
}
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Im2Col");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = output_h * output_w * channels;
hipLaunchKernelGGL(( Im2ColNHWCCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_w,
channels,
img_data,
col_data);
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int /* groups */) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * height * width;
hipLaunchKernelGGL(( Col2ImNCHWCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Col2Im");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = height * width * channels;
hipLaunchKernelGGL(( Col2ImNHWCCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
width,
channels,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Im2ColNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
img_data,
col_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Col2ImNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
col_data,
img_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void CopyMatrix<CUDAContext>(
const size_t itemsize,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
CUDAContext* context,
TypeMeta::TypedCopy copy) {
CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context");
hipMemcpy2DAsync(
B,
ldb * itemsize,
A,
lda * itemsize,
N * itemsize,
M,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
#define CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(T) \
template <> \
void CopyMatrix<T, CUDAContext>( \
const int M, \
const int N, \
const T* A, \
const int lda, \
T* B, \
const int ldb, \
CUDAContext* context) { \
if (M == 0 || N == 0) { \
return; \
} \
hipMemcpy2DAsync( \
B, \
sizeof(T) * ldb, \
A, \
sizeof(T) * lda, \
sizeof(T) * N, \
M, \
hipMemcpyDeviceToDevice, \
context->cuda_stream()); \
}
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(float)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(double)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(TIndex)
#undef CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX
template <>
CAFFE2_CUDA_EXPORT void CopyVector<float, CUDAContext>(
const int N,
const float* src,
float* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
hipMemcpyAsync(
dst,
src,
sizeof(float) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
}
namespace {
template <typename T>
using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, class Reducer>
__global__ void RowwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
val = reducer(X[i * cols + j], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
template <typename T, class Reducer>
__global__ void ColwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
val = reducer(X[j * cols + i], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void RowwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( RowwiseReduceKernel), \
::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX
#define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void ColwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( ColwiseReduceKernel), \
::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX
namespace {
__global__ void
maximum_kernel(const int N, const float alpha, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = fmaxf(x[i], alpha);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Maximum(
const int N,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( maximum_kernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, alpha, x, y);
}
namespace {
template <typename T, class Reducer, int D>
__global__ void ReduceTensorCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<FixedDivisor<int>, D> Y_dims,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
Y_dims.data[d].DivMod(Y_index, &Y_index, &r);
X_index += r * X_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
val = reducer(val, __ldg(X + X_index));
#else
val = reducer(val, X[X_index]);
#endif
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
template <typename T, class Reducer, int D>
CAFFE2_CUDA_EXPORT void ReduceTensorCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const Reducer& reducer,
const T init,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
}
hipLaunchKernelGGL(( ReduceTensorCUDAKernel<T, Reducer, D>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
X_strides,
Y_dims,
reducer,
init,
alpha,
X,
Y);
}
template <typename T, class Reducer>
CAFFE2_CUDA_EXPORT void ReduceTensorCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const Reducer& reducer,
const T init,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
std::vector<int> Y_dims_vector(dims, dims + num_dims);
for (int i = 0; i < num_axes; ++i) {
Y_dims_vector[axes[i]] = 1;
}
const int* X_dims = dims;
const int* Y_dims = Y_dims_vector.data();
const int X_size =
std::accumulate(X_dims, X_dims + num_dims, 1, std::multiplies<int>());
const int Y_size =
std::accumulate(Y_dims, Y_dims + num_dims, 1, std::multiplies<int>());
if (X_size == 0) {
Set<T, CUDAContext>(Y_size, alpha * init, Y, context);
return;
}
if (alpha == T(0)) {
Set<T, CUDAContext>(Y_size, T(0), Y, context);
return;
}
if (std::equal(X_dims, X_dims + num_dims, Y_dims)) {
Scale<T, T, CUDAContext>(X_size, alpha, X, Y, context);
return;
}
int rows;
int cols;
if (utils::IsRowwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
hipLaunchKernelGGL(( RowwiseReduceKernel<T>)
, dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), rows, cols, reducer, init, alpha, X, Y);
return;
}
if (utils::IsColwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
hipLaunchKernelGGL(( ColwiseReduceKernel<T>)
, dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), rows, cols, reducer, init, alpha, X, Y);
return;
}
std::vector<int> transpose_axes(num_dims);
utils::ComputeTransposeAxesForReduceOp(
num_dims, num_axes, axes, transpose_axes.data());
const int outer_size = Y_size;
const int inner_size = X_size / Y_size;
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(
num_dims,
ReduceTensorCUDAImpl,
T,
Reducer,
outer_size,
inner_size,
dims,
transpose_axes.data(),
reducer,
init,
alpha,
X,
Y,
context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMin<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
hipcub::Min(), \
std::numeric_limits<T>::max(), \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMax<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
hipcub::Max(), \
std::numeric_limits<T>::lowest(), \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceSum<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
hipcub::Sum(), \
T(0), \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMean<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
int scale = 1; \
for (int i = 0; i < num_axes; ++i) { \
scale *= dims[axes[i]]; \
} \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
hipcub::Sum(), \
T(0), \
alpha / static_cast<T>(scale), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(float)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN
namespace {
template <typename T, int D>
__global__ void BroadcastCUDAKernel(
const int Y_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<FixedDivisor<int>, D> Y_dims,
const T alpha,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, Y_size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
Y_dims.data[i].DivMod(Y_index_val, &Y_index_val, &d);
X_index += d * X_strides.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index) * alpha;
#else
Y[Y_index] = X[X_index] * alpha;
#endif
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void BroadcastCUDAImpl(
const int X_ndim,
const int* X_dims,
const int* Y_dims,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides_array;
SimpleArray<FixedDivisor<int>, D> Y_dims_array;
const int d = D - X_ndim;
std::fill(X_strides_array.data, X_strides_array.data + d, 0);
int cur_stride = 1;
for (int i = D - 1; i >= d; --i) {
CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]);
X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride;
cur_stride *= X_dims[i - d];
}
for (int i = 0; i < D; ++i) {
if (Y_dims[i] == 0) {
return;
}
Y_dims_array.data[i] = FixedDivisor<int>(Y_dims[i]);
}
const int Y_size =
std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>());
hipLaunchKernelGGL(( BroadcastCUDAKernel<T, D>)
, dim3(CAFFE_GET_BLOCKS(Y_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
Y_size, X_strides_array, Y_dims_array, alpha, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \
template <> \
CAFFE2_CUDA_EXPORT void Broadcast<T, CUDAContext>( \
const int X_ndim, \
const int* X_dims, \
const int Y_ndim, \
const int* Y_dims, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
Y_ndim, \
BroadcastCUDAImpl, \
T, \
X_ndim, \
X_dims, \
Y_dims, \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(float)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(double)
#undef CAFFE2_SPECIALIZED_CUDA_BROADCAST
namespace {
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(cols);
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
const int X_index = i * cols + j;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T>
__global__ void ColwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(rows);
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
const int X_index = j * cols + i;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T, int D>
__global__ void MomentsCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<FixedDivisor<int>, D> Y_dims,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(inner_size);
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
Y_dims.data[d].DivMod(Y_index, &Y_index, &r);
X_index += r * X_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void MomentsCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
}
hipLaunchKernelGGL(( MomentsCUDAKernel<T, D>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size, inner_size, X_strides, Y_dims, X, mean, variance);
}
template <typename T>
CAFFE2_CUDA_EXPORT void MomentsCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
std::vector<int> Y_dims_vector(dims, dims + num_dims);
for (int i = 0; i < num_axes; ++i) {
Y_dims_vector[axes[i]] = 1;
}
const int* X_dims = dims;
const int* Y_dims = Y_dims_vector.data();
const int X_size =
std::accumulate(X_dims, X_dims + num_dims, 1, std::multiplies<int>());
const int Y_size =
std::accumulate(Y_dims, Y_dims + num_dims, 1, std::multiplies<int>());
if (X_size == 0) {
Set<T, CUDAContext>(Y_size, T(0), mean, context);
Set<T, CUDAContext>(Y_size, T(0), variance, context);
return;
}
if (std::equal(X_dims, X_dims + num_dims, Y_dims)) {
hipMemcpyAsync(
mean,
X,
sizeof(T) * X_size,
hipMemcpyDeviceToDevice,
context->cuda_stream());
Set<T, CUDAContext>(Y_size, T(0), variance, context);
return;
}
int rows;
int cols;
if (utils::IsRowwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
hipLaunchKernelGGL(( RowwiseMomentsCUDAKernel<T>)
, dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), rows, cols, X, mean, variance);
return;
}
if (utils::IsColwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
hipLaunchKernelGGL(( ColwiseMomentsCUDAKernel<T>)
, dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), rows, cols, X, mean, variance);
return;
}
std::vector<int> transpose_axes(num_dims);
utils::ComputeTransposeAxesForReduceOp(
num_dims, num_axes, axes, transpose_axes.data());
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
num_dims,
MomentsCUDAImpl,
T,
outer_size,
inner_size,
dims,
transpose_axes.data(),
X,
mean,
variance,
context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_MOMENTS(T) \
template <> \
CAFFE2_CUDA_EXPORT void Moments<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* mean, \
T* variance, \
CUDAContext* context) { \
MomentsCUDA<T>( \
num_dims, dims, num_axes, axes, X, mean, variance, context); \
}
CAFFE2_SPECIALIZED_CUDA_MOMENTS(float)
#undef CAFFE2_SPECIALIZED_CUDA_MOMENTS
namespace {
template <typename T>
__global__ void
InvStdCUDAKernel(const int N, const T epsilon, const T* var, T* inv_std);
#define DELEGATE_INV_STD_KERNEL_FUNCTION(T, Func) \
template <> \
__global__ void InvStdCUDAKernel<T>( \
const int N, const T epsilon, const T* var, T* inv_std) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
inv_std[i] = Func(var[i] + epsilon); \
} \
}
DELEGATE_INV_STD_KERNEL_FUNCTION(float, rsqrtf)
#undef DELEGATE_INV_STD_KERNEL_FUNCTION
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_INV_STD(T) \
template <> \
CAFFE2_CUDA_EXPORT void InvStd<T, CUDAContext>( \
const int N, \
const T epsilon, \
const T* var, \
T* inv_std, \
CUDAContext* context) { \
hipLaunchKernelGGL(( InvStdCUDAKernel<T>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, epsilon, var, inv_std); \
}
CAFFE2_SPECIALIZED_CUDA_INV_STD(float)
#undef CAFFE2_SPECIALIZED_CUDA_INV_STD
namespace {
template <typename T, int D>
__global__ void TransposeCUDAKernel(
const int size,
const SimpleArray<int, D> X_strides,
const SimpleArray<FixedDivisor<int>, D> Y_dims,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
Y_dims.data[i].DivMod(Y_index_val, &Y_index_val, &d);
X_index += d * X_strides.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void TransposeCUDAImpl(
const int* dims,
const int* axes,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
int size = 1;
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
size *= dims[i];
}
hipLaunchKernelGGL(( TransposeCUDAKernel<T, D>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, X_strides, Y_dims, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(T) \
template <> \
CAFFE2_CUDA_EXPORT void Transpose<T, CUDAContext>( \
const int ndim, \
const int* dims, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
if (utils::IsIdentityPermutation(ndim, axes)) { \
const int size = \
std::accumulate(dims, dims + ndim, 1, std::multiplies<int>()); \
context->template Copy<T, CUDAContext, CUDAContext>(size, X, Y); \
return; \
} \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
ndim, TransposeCUDAImpl, T, dims, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(float)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(double)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(TIndex)
#undef CAFFE2_SPECIALIZED_CUDA_TRANSPOSE
namespace {
template <typename T, StorageOrder kOrder>
__global__ void AffineChannelCUDAKernel(
const int size,
const int C,
const int HxW,
const T* X,
const T* scale,
const T* bias,
T* Y) {
CUDA_1D_KERNEL_LOOP(i, size) {
const int c = kOrder == StorageOrder::NCHW ? i / HxW % C : i % C;
#if __CUDA_ARCH__ >= 350
Y[i] = __ldg(scale + c) * __ldg(X + i) + __ldg(bias + c);
#else
Y[i] = scale[c] * X[i] + bias[c];
#endif
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(T, kOrder) \
template <> \
CAFFE2_CUDA_EXPORT void AffineChannel<T, CUDAContext, kOrder>( \
const int N, \
const int C, \
const int HxW, \
const T* X, \
const T* scale, \
const T* bias, \
T* Y, \
CUDAContext* context) { \
const int size = N * C * HxW; \
hipLaunchKernelGGL(( AffineChannelCUDAKernel<T, kOrder>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, C, HxW, X, scale, bias, Y); \
}
CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(float, StorageOrder::NCHW)
CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(float, StorageOrder::NHWC)
#undef CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL
} // namespace math
} // namespace caffe2
| f0c3f732f99446195ede43da45c4c6e874aca5c3.cu | // Implements the math functions for GPU.
#include "caffe2/utils/math.h"
#include <cstring>
#include <limits>
#include <numeric>
#include <vector>
#include <cub/block/block_reduce.cuh>
#include <cub/cub.cuh>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/fixed_divisor.h"
#include "caffe2/utils/math_utils.h"
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
namespace {
#define DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Func, expr) \
template <typename T> \
struct Func##Functor { \
inline __host__ __device__ T \
operator()(const T& lhs, const T& rhs) const { \
return lhs expr rhs; \
} \
}; \
template <> \
struct Func##Functor<float16> { \
inline __host__ __device__ float16 \
operator()(const float16& lhs, const float16& rhs) const { \
return convert::To<float, float16>(convert::To<float16, float>( \
lhs) expr convert::To<float16, float>(rhs)); \
} \
};
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Add, +)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Sub, -)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Mul, *)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Div, /)
#undef DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR
template <typename T>
__global__ void SinCosCUDAKernel(const int N, const T* X, T* S, T* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
sincos(__ldg(X + i), S + i, C + i);
#else
sincos(X[i], S + i, C + i);
#endif
}
}
template <typename TIn, typename TOut, class BinaryOperator>
__global__ void SimpleBinaryOpCUDAKernel(
const int N,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
C[i] = op(A[i], B[i]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void RowwiseBinaryOpCUDAKenel(
const int size,
const FixedDivisor<int> cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int j = cols.Mod(C_index);
const int A_index = broadcast_1st ? j : C_index;
const int B_index = broadcast_1st ? C_index : j;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void ColwiseBinaryOpCUDAKenel(
const int size,
const FixedDivisor<int> cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int i = cols.Div(C_index);
const int A_index = broadcast_1st ? i : C_index;
const int B_index = broadcast_1st ? C_index : i;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
__global__ void BroadcastBinaryOpCUDAKernel(
const int size,
const SimpleArray<int, D> A_strides,
const SimpleArray<int, D> B_strides,
const SimpleArray<FixedDivisor<int>, D> C_dims,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
int A_index = 0;
int B_index = 0;
int C_index_val = C_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
C_dims.data[i].DivMod(C_index_val, &C_index_val, &d);
A_index += d * A_strides.data[i];
B_index += d * B_strides.data[i];
}
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BinaryOpWith2DBroadcasting(
const int rows,
const int cols,
const bool rowwise_broadcast,
const bool broadcast_1st,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
if (rows == 0 || cols == 0) {
return;
}
const int size = rows * cols;
const FixedDivisor<int> cols_div(cols);
if (rowwise_broadcast) {
if (broadcast_1st) {
RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
} else {
RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
}
} else {
if (broadcast_1st) {
ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
} else {
ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
}
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOpImpl(
const int* A_dims,
const int* B_dims,
const int* C_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
SimpleArray<int, D> A_strides_array;
SimpleArray<int, D> B_strides_array;
SimpleArray<FixedDivisor<int>, D> C_dims_array;
int A_stride = 1;
int B_stride = 1;
for (int i = D - 1; i >= 0; --i) {
if (C_dims[i] == 0) {
return;
}
A_strides_array.data[i] = A_dims[i] == 1 ? 0 : A_stride;
B_strides_array.data[i] = B_dims[i] == 1 ? 0 : B_stride;
A_stride *= A_dims[i];
B_stride *= B_dims[i];
C_dims_array.data[i] = FixedDivisor<int>(C_dims[i]);
}
const int size =
std::accumulate(C_dims, C_dims + D, 1, std::multiplies<int>());
BroadcastBinaryOpCUDAKernel<TIn, TOut, BinaryOperator, D>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
size, A_strides_array, B_strides_array, C_dims_array, op, A, B, C);
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOp(
const int A_ndim,
const int* A_dims,
const int B_ndim,
const int* B_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
const int ndim = std::max(A_ndim, B_ndim);
std::vector<int> A_dims_array(ndim);
std::vector<int> B_dims_array(ndim);
std::vector<int> C_dims_array(ndim);
utils::ComputeBroadcastBinaryOpDims(
A_ndim,
A_dims,
B_ndim,
B_dims,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data());
if (A_dims_array == B_dims_array) {
const int size = std::accumulate(
C_dims_array.cbegin(), C_dims_array.cend(), 1, std::multiplies<int>());
SimpleBinaryOpCUDAKernel<TIn, TOut, BinaryOperator>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, op, A, B, C);
return;
}
int rows;
int cols;
bool broadcast_1st;
if (utils::IsRowwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, true, broadcast_1st, op, A, B, C, context);
return;
}
if (utils::IsColwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, false, broadcast_1st, op, A, B, C, context);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3(
ndim,
BroadcastBinaryOpImpl,
TIn,
TOut,
BinaryOperator,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data(),
op,
A,
B,
C,
context);
}
} // namespace
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Func, op) \
__global__ void Func##CUDAKernel(const int N, const T* X, T* Y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
Y[i] = op(X[i]); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* x, T* y, CUDAContext* context) { \
Func##CUDAKernel<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, x, y); \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Acos, acosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Asin, asinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tan, tanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Atan, atanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sinh, sinhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cosh, coshf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tanh, tanhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, utils::Square<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Rsqrt, rsqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cbrt, cbrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cube, utils::Cube<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Cube, utils::Cube<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Cube,
utils::Cube<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Cube,
utils::Cube<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(bool, Not, utils::Not)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Neg, utils::Negate<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Neg, utils::Negate<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Neg,
utils::Negate<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Neg,
utils::Negate<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sign, utils::Sign<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sign, utils::Sign<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Sign,
utils::Sign<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Sign,
utils::Sign<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Inv, utils::Inv<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Inv, utils::Inv<double>)
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_SINCOS(T) \
template <> \
CAFFE2_CUDA_EXPORT void SinCos<T, CUDAContext>( \
const int N, const T* x, T* ys, T* yc, CUDAContext* context) { \
SinCosCUDAKernel<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, x, ys, yc); \
}
CAFFE2_SPECIALIZED_CUDA_SINCOS(float)
CAFFE2_SPECIALIZED_CUDA_SINCOS(double)
#undef CAFFE2_SPECIALIZED_CUDA_SINCOS
#define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \
const int N, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
SimpleBinaryOpCUDAKernel<TIn, TOut, Op<TIn>> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, Op<TIn>(), A, B, C); \
}
#define DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION
#define DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float16, float16, Func, Op)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_SIMPLE_CUDA_BINARY_FUNCTION
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
float,
float,
ElemwiseMax,
thrust::maximum);
#undef DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION
#define DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
}
#define DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float16, float16, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \
const int A_ndim, \
const int* A_dims, \
const int B_ndim, \
const int* B_dims, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
BroadcastBinaryOp<TIn, TOut, Op<TIn>>( \
A_ndim, A_dims, B_ndim, B_dims, Op<TIn>(), A, B, C, context); \
}
#define DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float16, float16, Func, Op)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \
template <> \
CAFFE2_CUDA_EXPORT void Funcname<T, CUDAContext>( \
const int N, \
const T* src, \
T* dst, \
Tensor* scratch_ptr, \
CUDAContext* context) { \
size_t memRequired = 0; \
cub::DeviceReduce::func( \
nullptr, memRequired, src, dst, N, context->cuda_stream()); \
auto buffer_size = \
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T)); \
scratch_ptr->Resize(std::vector<TIndex>{buffer_size}); \
cub::DeviceReduce::func( \
static_cast<void*>(scratch_ptr->mutable_data<T>()), \
memRequired, \
src, \
dst, \
N, \
context->cuda_stream()); \
}
DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min)
DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max)
#undef DELEGATE_REDUCTION_FUNCTION
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
N));
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<float16, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
A,
CUDA_R_16F,
lda,
&beta,
C,
CUDA_R_16F,
N));
} else if (math_type == TensorProto_DataType_FLOAT16) {
// convert alpha, beta from float -> __half
const __half alpha_fp16 = convert::floatToHalf(alpha);
const __half beta_fp16 = convert::floatToHalf(beta);
// call cublasHgemm
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
(const __half*)A,
lda,
&beta_fp16,
(__half*)C,
N));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
template <>
CAFFE2_CUDA_EXPORT void BiasCHW<float, CUDAContext>(
const float* bias,
const float* bias_multiplier,
const int bias_channels,
const int image_size,
float* image,
CUDAContext* context) {
Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
bias_channels,
image_size,
1,
1,
bias,
bias_multiplier,
1,
image,
context);
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float** A,
const float** B,
const float beta,
float** C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
thrust::device_vector<const float*> A_device(A, A + batch_size);
thrust::device_vector<const float*> B_device(B, B + batch_size);
thrust::device_vector<float*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
ldb,
A_device.data().get(),
lda,
&beta,
C_device.data().get(),
ldc,
batch_size));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
B_stride,
A,
lda,
A_stride,
&beta,
C,
ldc,
C_stride,
batch_size));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<float16, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float16** A,
const float16** B,
const float beta,
float16** C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 9
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float16, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
#if CUDA_VERSION < 9010
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float16, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
thrust::device_vector<const void*> A_device(A, A + batch_size);
thrust::device_vector<const void*> B_device(B, B + batch_size);
thrust::device_vector<void*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasGemmBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
CUDA_R_16F,
ldb,
A_device.data().get(),
CUDA_R_16F,
lda,
&beta,
C_device.data().get(),
CUDA_R_16F,
ldc,
batch_size,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#endif
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = convert::floatToHalf(alpha);
const __half beta_fp16 = convert::floatToHalf(beta);
std::vector<const __half*> A_array(batch_size);
std::vector<const __half*> B_array(batch_size);
std::vector<__half*> C_array(batch_size);
for (int i = 0; i < batch_size; ++i) {
A_array[i] = reinterpret_cast<const __half*>(A[i]);
B_array[i] = reinterpret_cast<const __half*>(B[i]);
C_array[i] = reinterpret_cast<__half*>(C[i]);
}
thrust::device_vector<const __half*> A_device(
A_array.cbegin(), A_array.cend());
thrust::device_vector<const __half*> B_device(
B_array.cbegin(), B_array.cend());
thrust::device_vector<__half*> C_device(C_array.cbegin(), C_array.cend());
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
B_device.data().get(),
ldb,
A_device.data().get(),
lda,
&beta_fp16,
C_device.data().get(),
ldc,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<float16, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const int A_stride,
const float16* B,
const int B_stride,
const float beta,
float16* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float16, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
#if CUDA_VERSION < 9010
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float16, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasGemmStridedBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
B_stride,
A,
CUDA_R_16F,
lda,
A_stride,
&beta,
C,
CUDA_R_16F,
ldc,
C_stride,
batch_size,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#endif
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = convert::floatToHalf(alpha);
const __half beta_fp16 = convert::floatToHalf(beta);
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
B_stride,
(const __half*)A,
lda,
A_stride,
&beta_fp16,
(__half*)C,
ldc,
C_stride,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
#endif
}
#if CUDA_VERSION >= 9000
// No change, but required. Defer to default CUDA engine
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
return Gemm<float, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<float16, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
// enable TensorCore for this call on this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_TENSOR_OP_MATH));
}
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasGemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
A,
CUDA_R_16F,
lda,
&beta,
C,
CUDA_R_16F,
N,
CUDA_R_32F,
CUBLAS_GEMM_DFALT_TENSOR_OP));
// Now disable TensorCore math for subsequent calls to this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_DEFAULT_MATH));
}
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
return GemmStridedBatched<float, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<float16, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const int A_stride,
const float16* B,
const int B_stride,
const float beta,
float16* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
return GemmStridedBatched<float16, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
#endif // CUDA_VERSION >= 9000
template <>
CAFFE2_CUDA_EXPORT void GemmEx<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int lda,
const float* B,
const int ldb,
const float beta,
float* C,
const int ldc,
CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
ldc));
}
template <>
CAFFE2_CUDA_EXPORT void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemv(
context->cublas_handle(),
cu_trans_A,
N,
M,
&alpha,
A,
N,
x,
1,
&beta,
y,
1));
}
// Batched Add variants
namespace {
template <typename T>
__global__ void AddStripedBatchKernel(
const int N,
const T* first,
T* Y,
const int stripe,
const int batch) {
for (int j = 0; j < batch; j++) {
const T* x = first + j * stripe;
CUDA_1D_KERNEL_LOOP(i, N) {
float tmpY = convert::To<T, float>(Y[i]);
tmpY += convert::To<T, float>(x[i]);
Y[i] = convert::To<float, T>(tmpY);
}
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \
template <> \
CAFFE2_CUDA_EXPORT void AddStripedBatch<T, CUDAContext>( \
const int N, \
const T* first, \
T* Y, \
const int stripe, \
const int batch, \
CUDAContext* context) { \
AddStripedBatchKernel<T> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, first, Y, stripe, batch); \
}
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float);
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float16);
#undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH
template <>
CAFFE2_CUDA_EXPORT void Gemv<float16, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const float16* A,
const float16* x,
const float beta,
float16* y,
CUDAContext* context,
TensorProto::DataType math_type) {
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
// sort out what we need to call cublasSgemmEx / cublasHgemm
const int m = (cu_trans_A == CUBLAS_OP_N) ? N : M;
const int k = (cu_trans_A == CUBLAS_OP_N) ? M : N;
const int lda = (cu_trans_A == CUBLAS_OP_N) ? m : k;
const int ldc = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_A,
CUBLAS_OP_N,
m,
1,
k,
&alpha,
A,
CUDA_R_16F,
lda,
x,
CUDA_R_16F,
k,
&beta,
y,
CUDA_R_16F,
ldc));
} else if (math_type == TensorProto_DataType_FLOAT16) {
const __half alpha_fp16 = convert::floatToHalf(alpha);
const __half beta_fp16 = convert::floatToHalf(beta);
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemm(
context->cublas_handle(),
cu_trans_A,
CUBLAS_OP_N,
m,
1,
k,
&alpha_fp16,
(const __half*)A,
lda,
(const __half*)x,
k,
&beta_fp16,
(__half*)y,
ldc));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
namespace {
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
CAFFE2_CUDA_API void Set<T, CUDAContext>( \
const size_t N, const T alpha, T* Y, CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (alpha == T(0)) { \
cudaMemsetAsync(Y, 0, sizeof(T) * N, context->cuda_stream()); \
} else { \
SetKernel<T> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, alpha, Y); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
CAFFE2_SPECIALIZED_CUDA_SET(double);
CAFFE2_SPECIALIZED_CUDA_SET(bool);
CAFFE2_SPECIALIZED_CUDA_SET(int8_t);
CAFFE2_SPECIALIZED_CUDA_SET(int16_t);
CAFFE2_SPECIALIZED_CUDA_SET(int);
CAFFE2_SPECIALIZED_CUDA_SET(int64_t);
CAFFE2_SPECIALIZED_CUDA_SET(char);
CAFFE2_SPECIALIZED_CUDA_SET(uint8_t);
CAFFE2_SPECIALIZED_CUDA_SET(uint16_t);
#undef CAFFE2_SPECIALIZED_CUDA_SET
template <>
CAFFE2_CUDA_EXPORT void Set<float16, CUDAContext>(
const size_t N,
const float16 alpha,
float16* Y,
CUDAContext* context) {
if (N > 0) {
SetKernel<float16>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, alpha, Y);
}
}
namespace {
template <typename T>
__global__ void
UniformShift(const size_t N, const float min, const float max, T* x) {
float scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min);
}
}
__global__ void
UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void RandUniform<float, CUDAContext>(
const size_t n,
const float min,
const float max,
float* r,
CUDAContext* context) {
CURAND_ENFORCE(curandGenerateUniform(context->curand_generator(), r, n));
UniformShift<float>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, min, max, r);
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<double, CUDAContext>(
const size_t n,
const double min,
const double max,
double* r,
CUDAContext* context) {
CURAND_ENFORCE(
curandGenerateUniformDouble(context->curand_generator(), r, n));
UniformShift<double>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, min, max, r);
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<int, CUDAContext>(
const size_t n,
const int min,
const int max,
int* r,
CUDAContext* context) {
CURAND_ENFORCE(curandGenerate(
context->curand_generator(), reinterpret_cast<unsigned int*>(r), n));
UniformIntFit<<<
CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
n, min, max, reinterpret_cast<unsigned int*>(r));
}
template <typename T>
size_t HandleOddLengthRandGaussian(
const size_t n,
const T mean,
const T std,
T* r,
CUDAContext* context) {
if (n % 2 == 1) {
std::default_random_engine generator;
std::normal_distribution<T> distribution(mean, std);
const T random_value = distribution(generator);
Set<T, CUDAContext>(1, random_value, r + (n - 1), context);
return n - 1;
}
return n;
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<float, CUDAContext>(
const size_t n,
const float mean,
const float std,
float* r,
CUDAContext* context) {
// If n is odd, we add a random Gaussian value at the end manually
// and generate n-1 random values using curandGenerateNormal.
// curandGenerateNormal requires n to be even.
const size_t even_n =
HandleOddLengthRandGaussian<float>(n, mean, std, r, context);
CURAND_ENFORCE(
curandGenerateNormal(context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<double, CUDAContext>(
const size_t n,
const double mean,
const double std,
double* r,
CUDAContext* context) {
const size_t even_n =
HandleOddLengthRandGaussian<double>(n, mean, std, r, context);
CURAND_ENFORCE(curandGenerateNormalDouble(
context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<float, CUDAContext>(
const int n,
const float* a,
const float* b,
float* y,
CUDAContext* context) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasSdot(context->cublas_handle(), n, a, 1, b, 1, y));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<float16, CUDAContext>(
const int n,
const float16* a,
const float16* b,
float16* y,
CUDAContext* context) {
// execute with 32-bit math
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasDotEx(
context->cublas_handle(),
n,
a,
CUDA_R_16F,
1,
b,
CUDA_R_16F,
1,
y,
CUDA_R_16F,
CUDA_R_32F));
}
// A previous version of caffe2 used Thrust but it turns out that thrust
// reduction has an implicit scratch space allocation and deallocation, which
// may interfere with NCCL and create a deadlock. Hence we are using a custom
// reduction here.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y, bool square) {
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A multilevel reduction.
// N -> 128
if (!square) {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += convert::To<T, float>(X[i]);
}
} else {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
float Xi = convert::To<T, float>(X[i]);
reduction_buffer[idx] += Xi * Xi;
}
}
__syncthreads();
// 128 -> 32
if (idx < 32) {
reduction_buffer[idx] += reduction_buffer[idx + 32] +
reduction_buffer[idx + 64] + reduction_buffer[idx + 96];
}
__syncthreads();
// 32 -> 1
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < 32; ++i) {
tmp += reduction_buffer[i];
}
*Y = convert::To<float, T>(tmp);
}
}
// According to the benchmarks script
// caffe2/caffe2/experiments/python/device_reduce_sum_bench.py,
// device reduce is slower for N <= 10000.
#define DEVICE_REDUCE_SIZE_THRESHOLD 10000
namespace {
template <typename T>
__global__ void SumConvertKernel(float* sum, T* dest) {
*dest = convert::To<float, T>(*sum);
}
template <typename T, typename IterT>
CAFFE2_CUDA_EXPORT void SumGenericIter(
const int N,
IterT it,
T*& dest,
CUDAContext* context,
Tensor* scratch_ptr) {
size_t memRequired = 0;
cub::DeviceReduce::Sum(
nullptr, memRequired, it, dest, N, context->cuda_stream());
auto buffer_size =
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T));
if (!dest) {
// allocate one more T at the end of scratch for dest
scratch_ptr->Resize(std::vector<TIndex>{buffer_size + 1});
dest = scratch_ptr->template mutable_data<T>() + buffer_size;
} else {
scratch_ptr->Resize(std::vector<TIndex>{buffer_size});
}
cub::DeviceReduce::Sum(
static_cast<void*>(scratch_ptr->template mutable_data<T>()),
memRequired,
it,
dest,
N,
context->cuda_stream());
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Sum<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<float>(N, x, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, false);
}
}
template <>
CAFFE2_CUDA_EXPORT void Sum<int32_t, CUDAContext>(
const int N,
const int32_t* x,
int32_t* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<int32_t>(N, x, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, false);
}
}
namespace {
template <typename T>
struct FloatTransform {
inline __host__ __device__ float operator()(const T v) const {
return convert::To<T, float>(v);
}
};
} // namespace
#define CAFFE2_MATH_SUM_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void Sum<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> transform; \
cub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \
x, transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \
} else { \
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \
N, x, y, false); \
} \
}
CAFFE2_MATH_SUM_FUNC(float16)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
} // namespace
template <>
CAFFE2_CUDA_EXPORT void SumSqr<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SqrTransform<float> transform;
cub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
x, transform);
SumGenericIter<float>(N, it, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, true);
}
}
#define CAFFE2_MATH_SUMSQR_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void SumSqr<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> float_transform; \
cub::TransformInputIterator<float, FloatTransform<T>, const T*> \
float_it(x, float_transform); \
SqrTransform<float> sqr_transform; \
cub::TransformInputIterator< \
float, \
SqrTransform<float>, \
decltype(float_it)> \
it(float_it, sqr_transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \
} else { \
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \
N, x, y, true); \
} \
}
CAFFE2_MATH_SUMSQR_FUNC(float16)
#undef CAFFE2_MATH_SUMSQR_FUNC
#undef DEVICE_REDUCE_SIZE_THRESHOLD
namespace {
template <typename T>
__global__ void
SelectKernel(const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Select<float, CUDAContext>(
const int N,
const int D,
const float* x,
const int* idx,
float* y,
CUDAContext* context) {
SelectKernel<float>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, x, idx, y);
}
template <>
CAFFE2_CUDA_EXPORT void Select<float16, CUDAContext>(
const int N,
const int D,
const float16* x,
const int* idx,
float16* y,
CUDAContext* context) {
SelectKernel<float16>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, x, idx, y);
}
namespace {
template <typename TAlpha, typename TData>
__global__ void
ScaleCUDAKernel(const int n, const TAlpha alpha, const TData* x, TData* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * static_cast<TData>(alpha);
#else
y[i] = x[i] * static_cast<TData>(alpha);
#endif
}
}
template <typename TAlpha, typename TData>
__global__ void
ScaleCUDAKernel(const int n, const TAlpha* alpha, const TData* x, TData* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * static_cast<TData>(__ldg(alpha));
#else
y[i] = x[i] * static_cast<TData>(*alpha);
#endif
}
}
template <typename T>
__global__ void PowKernel(const int n, const T* x, const T exponent, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = powf(x[i], exponent);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Powx<float, CUDAContext>(
const int N,
const float* a,
const float b,
float* y,
CUDAContext* context) {
PowKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, a, b, y);
}
#define DELEGATE_CUBLAS_SCALE_FUNCTION(TAlpha, TData, CuBLASFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (x != y) { \
cudaMemcpyAsync( \
y, \
x, \
sizeof(TData) * N, \
cudaMemcpyDeviceToDevice, \
context->cuda_stream()); \
} \
if (alpha != TAlpha(1)) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, &alpha, y, 1)); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha* alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (x != y) { \
cudaMemcpyAsync( \
y, \
x, \
sizeof(TData) * N, \
cudaMemcpyDeviceToDevice, \
context->cuda_stream()); \
} \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, alpha, y, 1)); \
}
DELEGATE_CUBLAS_SCALE_FUNCTION(float, float, cublasSscal)
DELEGATE_CUBLAS_SCALE_FUNCTION(double, double, cublasDscal)
#undef DELEGATE_CUBLAS_SCALE_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_SCALE(TAlpha, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (alpha == TAlpha(1)) { \
if (x != y) { \
cudaMemcpyAsync( \
y, \
x, \
sizeof(TData) * N, \
cudaMemcpyDeviceToDevice, \
context->cuda_stream()); \
} \
return; \
} \
ScaleCUDAKernel<TAlpha, TData> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, alpha, x, y); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha* alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
ScaleCUDAKernel<TAlpha, TData> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, alpha, x, y); \
}
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int32_t, std::int32_t)
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int64_t, std::int64_t)
#undef CAFFE2_SPECIALIZED_CUDA_SCALE
template <>
CAFFE2_CUDA_EXPORT void Scale<float16, float16, CUDAContext>(
const int N,
const float16 alpha,
const float16* x,
float16* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
cudaMemcpyAsync(
y,
x,
sizeof(float16) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasScalEx(
context->cublas_handle(),
N,
&alpha,
CUDA_R_16F,
y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Scale<float16, float16, CUDAContext>(
const int N,
const float16* alpha,
const float16* x,
float16* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
cudaMemcpyAsync(
y,
x,
sizeof(float16) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasScalEx(
context->cublas_handle(),
N,
alpha,
CUDA_R_16F,
y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Scale<float, float16, CUDAContext>(
const int N,
const float alpha,
const float16* x,
float16* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
cudaMemcpyAsync(
y,
x,
sizeof(float16) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
if (alpha != 1.0f) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasScalEx(
context->cublas_handle(),
N,
&alpha,
CUDA_R_32F,
y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
}
template <>
CAFFE2_CUDA_EXPORT void Scale<float, float16, CUDAContext>(
const int N,
const float* alpha,
const float16* x,
float16* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
cudaMemcpyAsync(
y,
x,
sizeof(float16) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasScalEx(
context->cublas_handle(),
N,
alpha,
CUDA_R_32F,
y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>(
const int N,
const float alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<double, CUDAContext>(
const int N,
const float alpha,
const double* X,
double* Y,
CUDAContext* context) {
double alpha_d{alpha};
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(
cublasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<float16, CUDAContext>(
const int N,
const float alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasAxpyEx(
context->cublas_handle(),
N,
&alpha,
CUDA_R_32F,
X,
CUDA_R_16F,
1,
Y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>(
const int N,
const float* alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasSaxpy(context->cublas_handle(), N, alpha, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<float16, CUDAContext>(
const int N,
const float* alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasAxpyEx(
context->cublas_handle(),
N,
alpha,
CUDA_R_32F,
X,
CUDA_R_16F,
1,
Y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
namespace {
template <typename TCoeff, typename TData>
__global__ void AxpbyCUDAKernel(
const int N,
const TCoeff a,
const TData* x,
const TCoeff b,
TData* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * a + y[i] * b;
#else
y[i] = x[i] * a + y[i] * b;
#endif
}
}
template <>
__global__ void AxpbyCUDAKernel<float, float16>(
const int N,
const float a,
const float16* x,
const float b,
float16* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = convert::To<float, float16>(
convert::To<float16, float>(x[i]) * a +
convert::To<float16, float>(y[i]) * b);
}
}
template <typename TCoeff, typename TData>
__global__ void AxpbyCUDAKernel(
const int N,
const TCoeff* a,
const TData* x,
const TCoeff* b,
TData* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * __ldg(a) + y[i] * __ldg(b);
#else
y[i] = x[i] * *a + y[i] * *b;
#endif
}
}
template <>
__global__ void AxpbyCUDAKernel<float, float16>(
const int N,
const float* a,
const float16* x,
const float* b,
float16* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = convert::To<float, float16>(
convert::To<float16, float>(x[i]) * __ldg(a) +
convert::To<float16, float>(y[i]) * __ldg(b));
#else
y[i] = convert::To<float, float16>(
convert::To<float16, float>(x[i]) * *a +
convert::To<float16, float>(y[i]) * *b);
#endif
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_AXPBY(TCoeff, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \
const int n, \
const TCoeff a, \
const TData* x, \
const TCoeff b, \
TData* y, \
CUDAContext* context) { \
AxpbyCUDAKernel<TCoeff, TData> \
<<<CAFFE_GET_BLOCKS(n), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(n, a, x, b, y); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \
const int n, \
const TCoeff* a, \
const TData* x, \
const TCoeff* b, \
TData* y, \
CUDAContext* context) { \
AxpbyCUDAKernel<TCoeff, TData> \
<<<CAFFE_GET_BLOCKS(n), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(n, a, x, b, y); \
}
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, float)
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, float16)
#undef CAFFE2_SPECIALIZED_CUDA_AXPBY
namespace {
template <typename T>
__global__ void Im2ColNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int w_out = index % output_w;
const int h_index = index / output_w;
const int h_out = h_index % output_h;
const int channel_in = h_index / output_h;
const int channel_out = channel_in * kernel_h * kernel_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
const int output_size = output_h * output_w;
T* col_data_ptr =
col_data + (channel_out * output_h + h_out) * output_w + w_out;
const T* img_data_ptr =
img_data + (channel_in * input_h + h_in) * input_w + w_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data_ptr + dh * input_w + dw)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data_ptr[dh * input_w + dw]
: 0;
#endif
col_data_ptr += output_size;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Im2ColNHWCCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_w,
const int channels,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int channel_in = index % channels;
const int w_out = index / channels % output_w;
const int h_out = index / channels / output_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
T* col_data_ptr = col_data +
(h_out * output_w + w_out) * channels * kernel_h * kernel_w +
channel_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data + (h * input_w + w) * channels + channel_in)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data[(h * input_w + w) * channels + channel_in]
: 0;
#endif
col_data_ptr += channels;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Col2ImNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int w = index % input_w + pad_l;
const int h = index / input_w % input_h + pad_t;
const int c = index / (input_h * input_w);
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h - h_col * stride_h);
int w_k = (w - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int col_data_index =
(((c * patch_h + h_k) * patch_w + w_k) * output_h + h_col) *
output_w +
w_col;
#if __CUDA_ARCH__ >= 350
val += __ldg(col_data + col_data_index);
#else
val += col_data[col_data_index];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T>
__global__ void Col2ImNHWCCUDAKernel(
const int n,
const int input_w,
const int channels,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int c = index % channels;
const int w = index / channels % input_w + pad_l;
const int h = index / channels / input_w + pad_t;
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
const int channels_col = patch_h * patch_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = h - h_col * stride_h;
int w_k = w - w_col * stride_w;
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int c_col = (h_k * patch_w + w_k) * channels + c;
#if __CUDA_ARCH__ >= 350
val += __ldg(
col_data + (h_col * output_w + w_col) * channels_col + c_col);
#else
val += col_data[(h_col * output_w + w_col) * channels_col + c_col];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T, int N, bool kCol2Im>
__global__ void Im2ColNdNCHWCUDAKernel(
const int outer_size,
const int inner_size,
const int kernel_size,
SimpleArray<int, N + 1> img_shape,
SimpleArray<int, N + 1> col_shape,
SimpleArray<int, N> kernel_shape,
SimpleArray<int, N> stride,
SimpleArray<int, N> dilation,
SimpleArray<int, N> pad,
const T* X_data,
T* Y_data) {
int d_offset[N];
int d_iter[N];
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
int offset_i = i;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_offset[d_i] = offset_i % kernel_shape.data[d_i];
offset_i /= kernel_shape.data[d_i];
}
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int offset_j = j;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_iter[d_i] = offset_j % col_shape.data[d_i + 1];
offset_j /= col_shape.data[d_i + 1];
}
const int col_index = i * inner_size + j;
int img_index = i / kernel_size;
bool is_padding = false;
#pragma unroll
for (int d_i = 0; d_i < N; ++d_i) {
const int d_img = d_iter[d_i] * stride.data[d_i] - pad.data[d_i] +
d_offset[d_i] * dilation.data[d_i];
is_padding |= !utils::IsAGeZeroAndALtB(d_img, img_shape.data[d_i + 1]);
img_index = img_index * img_shape.data[d_i + 1] + d_img;
}
#if __CUDA_ARCH__ >= 350
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : __ldg(X_data + img_index);
} else if (!is_padding) {
atomicAdd(Y_data + img_index, __ldg(X_data + col_index));
}
#else
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : X_data[img_index];
} else if (!is_padding) {
atomicAdd(Y_data + img_index, X_data[col_index]);
}
#endif
}
}
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Im2ColNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Im2ColNdNCHWCUDAKernel<T, N, false>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
img_data,
col_data);
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Col2ImNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Set<T, CUDAContext>(img_size, 0, img_data, context);
Im2ColNdNCHWCUDAKernel<T, N, true>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
col_data,
img_data);
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int /* groups */) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * output_h * output_w;
Im2ColNCHWCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
img_data,
col_data);
}
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Im2Col");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = output_h * output_w * channels;
Im2ColNHWCCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_w,
channels,
img_data,
col_data);
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int /* groups */) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * height * width;
Col2ImNCHWCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Col2Im");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = height * width * channels;
Col2ImNHWCCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
width,
channels,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Im2ColNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
img_data,
col_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Col2ImNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
col_data,
img_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void CopyMatrix<CUDAContext>(
const size_t itemsize,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
CUDAContext* context,
TypeMeta::TypedCopy copy) {
CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context");
cudaMemcpy2DAsync(
B,
ldb * itemsize,
A,
lda * itemsize,
N * itemsize,
M,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
#define CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(T) \
template <> \
void CopyMatrix<T, CUDAContext>( \
const int M, \
const int N, \
const T* A, \
const int lda, \
T* B, \
const int ldb, \
CUDAContext* context) { \
if (M == 0 || N == 0) { \
return; \
} \
cudaMemcpy2DAsync( \
B, \
sizeof(T) * ldb, \
A, \
sizeof(T) * lda, \
sizeof(T) * N, \
M, \
cudaMemcpyDeviceToDevice, \
context->cuda_stream()); \
}
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(float)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(double)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(TIndex)
#undef CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX
template <>
CAFFE2_CUDA_EXPORT void CopyVector<float, CUDAContext>(
const int N,
const float* src,
float* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
cudaMemcpyAsync(
dst,
src,
sizeof(float) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
}
namespace {
template <typename T>
using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, class Reducer>
__global__ void RowwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
val = reducer(X[i * cols + j], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
template <typename T, class Reducer>
__global__ void ColwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
val = reducer(X[j * cols + i], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void RowwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
RowwiseReduceKernel<<< \
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
N, D, cub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX
#define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void ColwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
ColwiseReduceKernel<<< \
std::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
N, D, cub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX
namespace {
__global__ void
maximum_kernel(const int N, const float alpha, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = fmaxf(x[i], alpha);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Maximum(
const int N,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
maximum_kernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, alpha, x, y);
}
namespace {
template <typename T, class Reducer, int D>
__global__ void ReduceTensorCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<FixedDivisor<int>, D> Y_dims,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
Y_dims.data[d].DivMod(Y_index, &Y_index, &r);
X_index += r * X_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
val = reducer(val, __ldg(X + X_index));
#else
val = reducer(val, X[X_index]);
#endif
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
template <typename T, class Reducer, int D>
CAFFE2_CUDA_EXPORT void ReduceTensorCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const Reducer& reducer,
const T init,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
}
ReduceTensorCUDAKernel<T, Reducer, D>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
X_strides,
Y_dims,
reducer,
init,
alpha,
X,
Y);
}
template <typename T, class Reducer>
CAFFE2_CUDA_EXPORT void ReduceTensorCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const Reducer& reducer,
const T init,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
std::vector<int> Y_dims_vector(dims, dims + num_dims);
for (int i = 0; i < num_axes; ++i) {
Y_dims_vector[axes[i]] = 1;
}
const int* X_dims = dims;
const int* Y_dims = Y_dims_vector.data();
const int X_size =
std::accumulate(X_dims, X_dims + num_dims, 1, std::multiplies<int>());
const int Y_size =
std::accumulate(Y_dims, Y_dims + num_dims, 1, std::multiplies<int>());
if (X_size == 0) {
Set<T, CUDAContext>(Y_size, alpha * init, Y, context);
return;
}
if (alpha == T(0)) {
Set<T, CUDAContext>(Y_size, T(0), Y, context);
return;
}
if (std::equal(X_dims, X_dims + num_dims, Y_dims)) {
Scale<T, T, CUDAContext>(X_size, alpha, X, Y, context);
return;
}
int rows;
int cols;
if (utils::IsRowwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
RowwiseReduceKernel<T>
<<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(rows, cols, reducer, init, alpha, X, Y);
return;
}
if (utils::IsColwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
ColwiseReduceKernel<T>
<<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(rows, cols, reducer, init, alpha, X, Y);
return;
}
std::vector<int> transpose_axes(num_dims);
utils::ComputeTransposeAxesForReduceOp(
num_dims, num_axes, axes, transpose_axes.data());
const int outer_size = Y_size;
const int inner_size = X_size / Y_size;
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(
num_dims,
ReduceTensorCUDAImpl,
T,
Reducer,
outer_size,
inner_size,
dims,
transpose_axes.data(),
reducer,
init,
alpha,
X,
Y,
context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMin<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
cub::Min(), \
std::numeric_limits<T>::max(), \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMax<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
cub::Max(), \
std::numeric_limits<T>::lowest(), \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceSum<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
cub::Sum(), \
T(0), \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMean<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
int scale = 1; \
for (int i = 0; i < num_axes; ++i) { \
scale *= dims[axes[i]]; \
} \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
cub::Sum(), \
T(0), \
alpha / static_cast<T>(scale), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(float)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN
namespace {
template <typename T, int D>
__global__ void BroadcastCUDAKernel(
const int Y_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<FixedDivisor<int>, D> Y_dims,
const T alpha,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, Y_size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
Y_dims.data[i].DivMod(Y_index_val, &Y_index_val, &d);
X_index += d * X_strides.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index) * alpha;
#else
Y[Y_index] = X[X_index] * alpha;
#endif
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void BroadcastCUDAImpl(
const int X_ndim,
const int* X_dims,
const int* Y_dims,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides_array;
SimpleArray<FixedDivisor<int>, D> Y_dims_array;
const int d = D - X_ndim;
std::fill(X_strides_array.data, X_strides_array.data + d, 0);
int cur_stride = 1;
for (int i = D - 1; i >= d; --i) {
CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]);
X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride;
cur_stride *= X_dims[i - d];
}
for (int i = 0; i < D; ++i) {
if (Y_dims[i] == 0) {
return;
}
Y_dims_array.data[i] = FixedDivisor<int>(Y_dims[i]);
}
const int Y_size =
std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>());
BroadcastCUDAKernel<T, D>
<<<CAFFE_GET_BLOCKS(Y_size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
Y_size, X_strides_array, Y_dims_array, alpha, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \
template <> \
CAFFE2_CUDA_EXPORT void Broadcast<T, CUDAContext>( \
const int X_ndim, \
const int* X_dims, \
const int Y_ndim, \
const int* Y_dims, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
Y_ndim, \
BroadcastCUDAImpl, \
T, \
X_ndim, \
X_dims, \
Y_dims, \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(float)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(double)
#undef CAFFE2_SPECIALIZED_CUDA_BROADCAST
namespace {
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(cols);
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
const int X_index = i * cols + j;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T>
__global__ void ColwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(rows);
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
const int X_index = j * cols + i;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T, int D>
__global__ void MomentsCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<FixedDivisor<int>, D> Y_dims,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(inner_size);
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
Y_dims.data[d].DivMod(Y_index, &Y_index, &r);
X_index += r * X_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void MomentsCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
}
MomentsCUDAKernel<T, D>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size, inner_size, X_strides, Y_dims, X, mean, variance);
}
template <typename T>
CAFFE2_CUDA_EXPORT void MomentsCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
std::vector<int> Y_dims_vector(dims, dims + num_dims);
for (int i = 0; i < num_axes; ++i) {
Y_dims_vector[axes[i]] = 1;
}
const int* X_dims = dims;
const int* Y_dims = Y_dims_vector.data();
const int X_size =
std::accumulate(X_dims, X_dims + num_dims, 1, std::multiplies<int>());
const int Y_size =
std::accumulate(Y_dims, Y_dims + num_dims, 1, std::multiplies<int>());
if (X_size == 0) {
Set<T, CUDAContext>(Y_size, T(0), mean, context);
Set<T, CUDAContext>(Y_size, T(0), variance, context);
return;
}
if (std::equal(X_dims, X_dims + num_dims, Y_dims)) {
cudaMemcpyAsync(
mean,
X,
sizeof(T) * X_size,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
Set<T, CUDAContext>(Y_size, T(0), variance, context);
return;
}
int rows;
int cols;
if (utils::IsRowwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
RowwiseMomentsCUDAKernel<T>
<<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(rows, cols, X, mean, variance);
return;
}
if (utils::IsColwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
ColwiseMomentsCUDAKernel<T>
<<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(rows, cols, X, mean, variance);
return;
}
std::vector<int> transpose_axes(num_dims);
utils::ComputeTransposeAxesForReduceOp(
num_dims, num_axes, axes, transpose_axes.data());
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
num_dims,
MomentsCUDAImpl,
T,
outer_size,
inner_size,
dims,
transpose_axes.data(),
X,
mean,
variance,
context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_MOMENTS(T) \
template <> \
CAFFE2_CUDA_EXPORT void Moments<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* mean, \
T* variance, \
CUDAContext* context) { \
MomentsCUDA<T>( \
num_dims, dims, num_axes, axes, X, mean, variance, context); \
}
CAFFE2_SPECIALIZED_CUDA_MOMENTS(float)
#undef CAFFE2_SPECIALIZED_CUDA_MOMENTS
namespace {
template <typename T>
__global__ void
InvStdCUDAKernel(const int N, const T epsilon, const T* var, T* inv_std);
#define DELEGATE_INV_STD_KERNEL_FUNCTION(T, Func) \
template <> \
__global__ void InvStdCUDAKernel<T>( \
const int N, const T epsilon, const T* var, T* inv_std) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
inv_std[i] = Func(var[i] + epsilon); \
} \
}
DELEGATE_INV_STD_KERNEL_FUNCTION(float, rsqrtf)
#undef DELEGATE_INV_STD_KERNEL_FUNCTION
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_INV_STD(T) \
template <> \
CAFFE2_CUDA_EXPORT void InvStd<T, CUDAContext>( \
const int N, \
const T epsilon, \
const T* var, \
T* inv_std, \
CUDAContext* context) { \
InvStdCUDAKernel<T> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, epsilon, var, inv_std); \
}
CAFFE2_SPECIALIZED_CUDA_INV_STD(float)
#undef CAFFE2_SPECIALIZED_CUDA_INV_STD
namespace {
template <typename T, int D>
__global__ void TransposeCUDAKernel(
const int size,
const SimpleArray<int, D> X_strides,
const SimpleArray<FixedDivisor<int>, D> Y_dims,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
Y_dims.data[i].DivMod(Y_index_val, &Y_index_val, &d);
X_index += d * X_strides.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void TransposeCUDAImpl(
const int* dims,
const int* axes,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
int size = 1;
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
size *= dims[i];
}
TransposeCUDAKernel<T, D>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, X_strides, Y_dims, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(T) \
template <> \
CAFFE2_CUDA_EXPORT void Transpose<T, CUDAContext>( \
const int ndim, \
const int* dims, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
if (utils::IsIdentityPermutation(ndim, axes)) { \
const int size = \
std::accumulate(dims, dims + ndim, 1, std::multiplies<int>()); \
context->template Copy<T, CUDAContext, CUDAContext>(size, X, Y); \
return; \
} \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
ndim, TransposeCUDAImpl, T, dims, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(float)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(double)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(TIndex)
#undef CAFFE2_SPECIALIZED_CUDA_TRANSPOSE
namespace {
template <typename T, StorageOrder kOrder>
__global__ void AffineChannelCUDAKernel(
const int size,
const int C,
const int HxW,
const T* X,
const T* scale,
const T* bias,
T* Y) {
CUDA_1D_KERNEL_LOOP(i, size) {
const int c = kOrder == StorageOrder::NCHW ? i / HxW % C : i % C;
#if __CUDA_ARCH__ >= 350
Y[i] = __ldg(scale + c) * __ldg(X + i) + __ldg(bias + c);
#else
Y[i] = scale[c] * X[i] + bias[c];
#endif
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(T, kOrder) \
template <> \
CAFFE2_CUDA_EXPORT void AffineChannel<T, CUDAContext, kOrder>( \
const int N, \
const int C, \
const int HxW, \
const T* X, \
const T* scale, \
const T* bias, \
T* Y, \
CUDAContext* context) { \
const int size = N * C * HxW; \
AffineChannelCUDAKernel<T, kOrder> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, C, HxW, X, scale, bias, Y); \
}
CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(float, StorageOrder::NCHW)
CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(float, StorageOrder::NHWC)
#undef CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL
} // namespace math
} // namespace caffe2
|
7305ec8567a28105203852721c18c61c99214c2e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
@author Ahmad Abdelfattah
This file implements upper case, and is called by ztrtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "magma_internal.h"
#define TRTRI_NONBATCHED
#include "ztrtri.cuh"
#include "ztrtri_upper_device.cuh"
/******************************************************************************/
__global__ void
ztrtri_diag_upper_kernel(
magma_diag_t diag, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *d_dinvA)
{
ztrtri_diag_upper_device(diag, n, A, lda, d_dinvA);
}
/******************************************************************************/
__global__ void
triple_zgemm16_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm16_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm16_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm16_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm32_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm32_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm32_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm32_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm64_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm64_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm_above64_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm_above64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm_above64_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm_above64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm_above64_part3_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm_above64_part3_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
| 7305ec8567a28105203852721c18c61c99214c2e.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
@author Ahmad Abdelfattah
This file implements upper case, and is called by ztrtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "magma_internal.h"
#define TRTRI_NONBATCHED
#include "ztrtri.cuh"
#include "ztrtri_upper_device.cuh"
/******************************************************************************/
__global__ void
ztrtri_diag_upper_kernel(
magma_diag_t diag, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *d_dinvA)
{
ztrtri_diag_upper_device(diag, n, A, lda, d_dinvA);
}
/******************************************************************************/
__global__ void
triple_zgemm16_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm16_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm16_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm16_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm32_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm32_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm32_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm32_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm64_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm64_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm_above64_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm_above64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm_above64_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm_above64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm_above64_part3_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm_above64_part3_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
|
a9a1ed94e6618d1f25b029fab13864bdc9f51d4d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
using namespace std;
__global__ void printThread(void) {
printf("Hello World! I am thread %d\n", threadIdx.x);
}
int main() {
hipLaunchKernelGGL(( printThread), dim3(1), dim3(4), 0, 0, );
hipDeviceSynchronize();
return 0;
}
| a9a1ed94e6618d1f25b029fab13864bdc9f51d4d.cu | #include <cstdio>
using namespace std;
__global__ void printThread(void) {
printf("Hello World! I am thread %d\n", threadIdx.x);
}
int main() {
printThread<<<1, 4>>>();
cudaDeviceSynchronize();
return 0;
}
|
4f3c785257f20f1e8142126d55bbb91b2dfc7722.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
//#include "REPEATL.h"
#include "../include/REPEATR.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 28
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
#define ITERATIONS REPLACE_ITERATIONS
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
int size = (400*max_tid*LINE_SIZE)/sizeof(int);
unsigned j=0, k=0;
int sum=0;
// Fill the L1 cache, Miss on every iteration
for (int i=0; i<ITERATIONS ; i++){
REPEAT_L6(0);
//REPLACE_ITERATIONS
}
/*
// Fill the L1 cache, Miss on first LD, Hit on subsequent LDs
for(k=0; k<ITERATIONS; ++k){
for(j=0; j<(size/2); j+=THREADS_PER_BLOCK){
C[tid+j] = A[tid+j];
}
}
*/
C[0]=sum;
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
int N = (400*max_tid*LINE_SIZE);
size_t size = N * sizeof(int) ;
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
//checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
//checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
hipFree(d_A);
//if (d_B)
// hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
| 4f3c785257f20f1e8142126d55bbb91b2dfc7722.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
//#include "REPEATL.h"
#include "../include/REPEATR.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 28
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
#define ITERATIONS REPLACE_ITERATIONS
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
int size = (400*max_tid*LINE_SIZE)/sizeof(int);
unsigned j=0, k=0;
int sum=0;
// Fill the L1 cache, Miss on every iteration
for (int i=0; i<ITERATIONS ; i++){
REPEAT_L6(0);
//REPLACE_ITERATIONS
}
/*
// Fill the L1 cache, Miss on first LD, Hit on subsequent LDs
for(k=0; k<ITERATIONS; ++k){
for(j=0; j<(size/2); j+=THREADS_PER_BLOCK){
C[tid+j] = A[tid+j];
}
}
*/
C[0]=sum;
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
int N = (400*max_tid*LINE_SIZE);
size_t size = N * sizeof(int) ;
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
//checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
//checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
cudaFree(d_A);
//if (d_B)
// cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
018701dbbcf4e9f4d29ad8941a7257c0bd2f5608.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// #include <cudaDefs.h>
// #include <rocblas.h>
#include "../../my_cuda.cu"
#include <rocblas.h>
#include <rocblas.h>
hipError_t error = hipSuccess;
hipDeviceProp_t deviceProp = hipDeviceProp_t();
hipblasStatus_t status = hipblasStatus_t();
hipblasHandle_t handle = hipblasHandle_t();
const unsigned int N = 5;
const unsigned int dim = 3;
const unsigned int MEMSIZE = N * dim * sizeof(float);
const unsigned int THREAD_PER_BLOCK = 128;
const unsigned int GRID_SIZE = (N * dim + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK;
void fillData(float *data, const unsigned int length, const unsigned int dim)
{
unsigned int id = 0;
for (unsigned int i = 0; i < length; i++)
{
for (unsigned int j = 0; j < dim; j++)
{
data[id++] = i & 255; //=i%256
}
}
}
void fillDataWithNumber(float *data, const unsigned int length, const unsigned int dim, const float number)
{
unsigned int id = 0;
for (unsigned int i = 0; i < length; i++)
{
for (unsigned int j = 0; j < dim; j++)
{
data[id++] = number;
}
}
}
__global__ void kernelPowerTwo(const float *a, const float *b, const unsigned int length, float *a2, float *b2)
{
//TODO:
}
int main(int argc, char *argv[])
{
//initializeCUDA(deviceProp);
status = hipblasCreate(&handle);
float alpha, beta;
float *a, *b, *m;
float *da, *da2, *db, *db2, *dm;
float *ones, *dones;
// paged-locked allocation
hipHostMalloc((void **)&a, MEMSIZE, hipHostMallocDefault);
hipHostMalloc((void **)&b, MEMSIZE, hipHostMallocDefault);
hipHostMalloc((void **)&ones, MEMSIZE, hipHostMallocDefault);
hipHostMalloc((void **)&m, N * N * sizeof(float), hipHostMallocDefault);
hipMalloc((void **)&da, MEMSIZE);
hipMalloc((void **)&da2, MEMSIZE);
hipMalloc((void **)&db, MEMSIZE);
hipMalloc((void **)&db2, MEMSIZE);
hipMalloc((void **)&dones, MEMSIZE);
hipMalloc((void **)&dm, N * N * sizeof(float));
fillData(a, N, dim);
fillData(b, N, dim);
fillDataWithNumber(ones, N, dim, 1.0f);
//Copy data to DEVICE
hipMemcpy(da, a, MEMSIZE, hipMemcpyHostToDevice);
hipMemcpy(db, b, MEMSIZE, hipMemcpyHostToDevice);
hipMemcpy(dones, ones, MEMSIZE, hipMemcpyHostToDevice);
//TODO 1: Process a -> a^2 and b->b^2
hipLaunchKernelGGL(( kernelPowerTwo), dim3(GRID_SIZE), dim3(THREAD_PER_BLOCK), 0, 0, da, db, N * dim, da2, db2);
//TODO 2: Process a^2 + b^2 using CUBLAS //pair-wise operation such that the result is dm[N*N] matrix
//TODO 3: Process -2ab and sum with previous result stored in dm using CUBLAS
// checkDeviceMatrix<float>(da, sizeof(float) * dim, N, dim, "%f ", "A");
// checkDeviceMatrix<float>(da2, sizeof(float) * dim, N, dim, "%f ", "A^2");
// checkDeviceMatrix<float>(db, sizeof(float) * dim, N, dim, "%f ", "B");
// checkDeviceMatrix<float>(db2, sizeof(float) * dim, N, dim, "%f ", "B^2");
// checkDeviceMatrix<float>(dones, sizeof(float) * dim, N, dim, "%f ", "ONES");
// checkDeviceMatrix<float>(dm, sizeof(float) * N, N, N, "%f ", "M");
hipFree(da);
hipFree(da2);
hipFree(db);
hipFree(db2);
hipFree(dm);
hipFree(dones);
hipHostFree(a);
hipHostFree(b);
hipHostFree(m);
hipHostFree(ones);
status = hipblasDestroy(handle);
}
| 018701dbbcf4e9f4d29ad8941a7257c0bd2f5608.cu | // #include <cudaDefs.h>
// #include <cublas_v2.h>
#include "../../my_cuda.cu"
#include <cublas.h>
#include <cublas_v2.h>
cudaError_t error = cudaSuccess;
cudaDeviceProp deviceProp = cudaDeviceProp();
cublasStatus_t status = cublasStatus_t();
cublasHandle_t handle = cublasHandle_t();
const unsigned int N = 5;
const unsigned int dim = 3;
const unsigned int MEMSIZE = N * dim * sizeof(float);
const unsigned int THREAD_PER_BLOCK = 128;
const unsigned int GRID_SIZE = (N * dim + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK;
void fillData(float *data, const unsigned int length, const unsigned int dim)
{
unsigned int id = 0;
for (unsigned int i = 0; i < length; i++)
{
for (unsigned int j = 0; j < dim; j++)
{
data[id++] = i & 255; //=i%256
}
}
}
void fillDataWithNumber(float *data, const unsigned int length, const unsigned int dim, const float number)
{
unsigned int id = 0;
for (unsigned int i = 0; i < length; i++)
{
for (unsigned int j = 0; j < dim; j++)
{
data[id++] = number;
}
}
}
__global__ void kernelPowerTwo(const float *a, const float *b, const unsigned int length, float *a2, float *b2)
{
//TODO:
}
int main(int argc, char *argv[])
{
//initializeCUDA(deviceProp);
status = cublasCreate(&handle);
float alpha, beta;
float *a, *b, *m;
float *da, *da2, *db, *db2, *dm;
float *ones, *dones;
// paged-locked allocation
cudaHostAlloc((void **)&a, MEMSIZE, cudaHostAllocDefault);
cudaHostAlloc((void **)&b, MEMSIZE, cudaHostAllocDefault);
cudaHostAlloc((void **)&ones, MEMSIZE, cudaHostAllocDefault);
cudaHostAlloc((void **)&m, N * N * sizeof(float), cudaHostAllocDefault);
cudaMalloc((void **)&da, MEMSIZE);
cudaMalloc((void **)&da2, MEMSIZE);
cudaMalloc((void **)&db, MEMSIZE);
cudaMalloc((void **)&db2, MEMSIZE);
cudaMalloc((void **)&dones, MEMSIZE);
cudaMalloc((void **)&dm, N * N * sizeof(float));
fillData(a, N, dim);
fillData(b, N, dim);
fillDataWithNumber(ones, N, dim, 1.0f);
//Copy data to DEVICE
cudaMemcpy(da, a, MEMSIZE, cudaMemcpyHostToDevice);
cudaMemcpy(db, b, MEMSIZE, cudaMemcpyHostToDevice);
cudaMemcpy(dones, ones, MEMSIZE, cudaMemcpyHostToDevice);
//TODO 1: Process a -> a^2 and b->b^2
kernelPowerTwo<<<GRID_SIZE, THREAD_PER_BLOCK>>>(da, db, N * dim, da2, db2);
//TODO 2: Process a^2 + b^2 using CUBLAS //pair-wise operation such that the result is dm[N*N] matrix
//TODO 3: Process -2ab and sum with previous result stored in dm using CUBLAS
// checkDeviceMatrix<float>(da, sizeof(float) * dim, N, dim, "%f ", "A");
// checkDeviceMatrix<float>(da2, sizeof(float) * dim, N, dim, "%f ", "A^2");
// checkDeviceMatrix<float>(db, sizeof(float) * dim, N, dim, "%f ", "B");
// checkDeviceMatrix<float>(db2, sizeof(float) * dim, N, dim, "%f ", "B^2");
// checkDeviceMatrix<float>(dones, sizeof(float) * dim, N, dim, "%f ", "ONES");
// checkDeviceMatrix<float>(dm, sizeof(float) * N, N, N, "%f ", "M");
cudaFree(da);
cudaFree(da2);
cudaFree(db);
cudaFree(db2);
cudaFree(dm);
cudaFree(dones);
cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(m);
cudaFreeHost(ones);
status = cublasDestroy(handle);
}
|
6551c282622d1ff175116b8afaa4d74f36e6b31b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "sequential/render.h"
#include "cuda/render.h"
int main(int argc, char* argv[]){
set_memory();
set_memory_cuda();
hipDeviceSynchronize();
// render_sequential_exhaustive(argc, argv);
// render_sequential_barneshut(argc, argv);
// render_cuda_exhaustive(argc, argv);
render_cuda_barneshut(argc, argv);
hipDeviceSynchronize();
free_memory();
return 0;
} | 6551c282622d1ff175116b8afaa4d74f36e6b31b.cu | #include <stdio.h>
#include <cuda.h>
#include "sequential/render.h"
#include "cuda/render.h"
int main(int argc, char* argv[]){
set_memory();
set_memory_cuda();
cudaDeviceSynchronize();
// render_sequential_exhaustive(argc, argv);
// render_sequential_barneshut(argc, argv);
// render_cuda_exhaustive(argc, argv);
render_cuda_barneshut(argc, argv);
cudaDeviceSynchronize();
free_memory();
return 0;
} |
421a3c05ccea73a6c4e0258956728b241076113e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "include/kernels/nbodyfft.h"
__global__ void copy_to_fft_input(volatile float *__restrict__ fft_input,
const float *w_coefficients_device,
const int n_fft_coeffs,
const int n_fft_coeffs_half,
const int n_terms)
{
register int i, j;
register int TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_terms * n_fft_coeffs_half * n_fft_coeffs_half)
return;
register int current_term = TID / (n_fft_coeffs_half * n_fft_coeffs_half);
register int current_loc = TID % (n_fft_coeffs_half * n_fft_coeffs_half);
i = current_loc / n_fft_coeffs_half;
j = current_loc % n_fft_coeffs_half;
fft_input[current_term * (n_fft_coeffs * n_fft_coeffs) + i * n_fft_coeffs + j] = w_coefficients_device[current_term + current_loc * n_terms];
}
__global__ void copy_from_fft_output(volatile float *__restrict__ y_tilde_values,
const float *fft_output,
const int n_fft_coeffs,
const int n_fft_coeffs_half,
const int n_terms)
{
register int i, j;
register int TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_terms * n_fft_coeffs_half * n_fft_coeffs_half)
return;
register int current_term = TID / (n_fft_coeffs_half * n_fft_coeffs_half);
register int current_loc = TID % (n_fft_coeffs_half * n_fft_coeffs_half);
i = current_loc / n_fft_coeffs_half + n_fft_coeffs_half;
j = current_loc % n_fft_coeffs_half + n_fft_coeffs_half;
y_tilde_values[current_term + n_terms * current_loc] = fft_output[current_term * (n_fft_coeffs * n_fft_coeffs) + i * n_fft_coeffs + j] / (float)(n_fft_coeffs * n_fft_coeffs);
}
__global__ void compute_point_box_idx(volatile int *__restrict__ point_box_idx,
volatile float *__restrict__ x_in_box,
volatile float *__restrict__ y_in_box,
const float *const xs,
const float *const ys,
const float *const box_lower_bounds,
const float coord_min,
const float box_width,
const int n_boxes,
const int n_total_boxes,
const int N)
{
register int TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= N)
return;
register int x_idx = (int)((xs[TID] - coord_min) / box_width);
register int y_idx = (int)((ys[TID] - coord_min) / box_width);
x_idx = max(0, x_idx);
x_idx = min(n_boxes - 1, x_idx);
y_idx = max(0, y_idx);
y_idx = min(n_boxes - 1, y_idx);
register int box_idx = y_idx * n_boxes + x_idx;
point_box_idx[TID] = box_idx;
x_in_box[TID] = (xs[TID] - box_lower_bounds[box_idx]) / box_width;
y_in_box[TID] = (ys[TID] - box_lower_bounds[n_total_boxes + box_idx]) / box_width;
}
__global__ void interpolate_device(
volatile float *__restrict__ interpolated_values,
const float *const y_in_box,
const float *const y_tilde_spacings,
const float *const denominator,
const int n_interpolation_points,
const int N)
{
register int TID, i, j, k;
register float value, ybox_i;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= N * n_interpolation_points)
return;
i = TID % N;
j = TID / N;
value = 1;
ybox_i = y_in_box[i];
for (k = 0; k < n_interpolation_points; k++)
{
if (j != k)
{
value *= ybox_i - y_tilde_spacings[k];
}
}
interpolated_values[j * N + i] = value / denominator[j];
}
__global__ void compute_interpolated_indices(
float *__restrict__ w_coefficients_device,
const int *const point_box_indices,
const float *const chargesQij,
const float *const x_interpolated_values,
const float *const y_interpolated_values,
const int N,
const int n_interpolation_points,
const int n_boxes,
const int n_terms)
{
register int TID, current_term, i, interp_i, interp_j, box_idx, box_i, box_j, idx;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_terms * n_interpolation_points * n_interpolation_points * N)
return;
current_term = TID % n_terms;
i = (TID / n_terms) % N;
interp_j = ((TID / n_terms) / N) % n_interpolation_points;
interp_i = ((TID / n_terms) / N) / n_interpolation_points;
box_idx = point_box_indices[i];
box_i = box_idx % n_boxes;
box_j = box_idx / n_boxes;
// interpolated_values[TID] = x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * chargesQij[i * n_terms + current_term];
idx = (box_i * n_interpolation_points + interp_i) * (n_boxes * n_interpolation_points) +
(box_j * n_interpolation_points) + interp_j;
// interpolated_indices[TID] = idx * n_terms + current_term;
atomicAdd(
w_coefficients_device + idx * n_terms + current_term,
x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * chargesQij[i * n_terms + current_term]);
}
__global__ void compute_potential_indices(
float *__restrict__ potentialsQij,
const int *const point_box_indices,
const float *const y_tilde_values,
const float *const x_interpolated_values,
const float *const y_interpolated_values,
const int N,
const int n_interpolation_points,
const int n_boxes,
const int n_terms)
{
register int TID, current_term, i, interp_i, interp_j, box_idx, box_i, box_j, idx;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_terms * n_interpolation_points * n_interpolation_points * N)
return;
current_term = TID % n_terms;
i = (TID / n_terms) % N;
interp_j = ((TID / n_terms) / N) % n_interpolation_points;
interp_i = ((TID / n_terms) / N) / n_interpolation_points;
box_idx = point_box_indices[i];
box_i = box_idx % n_boxes;
box_j = box_idx / n_boxes;
idx = (box_i * n_interpolation_points + interp_i) * (n_boxes * n_interpolation_points) +
(box_j * n_interpolation_points) + interp_j;
// interpolated_values[TID] = x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * y_tilde_values[idx * n_terms + current_term];
// interpolated_indices[TID] = i * n_terms + current_term;
atomicAdd(
potentialsQij + i * n_terms + current_term,
x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * y_tilde_values[idx * n_terms + current_term]);
}
__host__ __device__ float squared_cauchy_2d(float x1, float x2, float y1, float y2)
{
return pow(1.0 + pow(x1 - y1, 2) + pow(x2 - y2, 2), -2);
}
__global__ void compute_kernel_tilde(
volatile float *__restrict__ kernel_tilde,
const float x_min,
const float y_min,
const float h,
const int n_interpolation_points_1d,
const int n_fft_coeffs)
{
register int TID, i, j;
register float tmp;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_interpolation_points_1d * n_interpolation_points_1d)
return;
i = TID / n_interpolation_points_1d;
j = TID % n_interpolation_points_1d;
// TODO: Possibly issuing a memory pre-fetch here could help the code.
tmp = squared_cauchy_2d(y_min + h / 2, x_min + h / 2, y_min + h / 2 + i * h, x_min + h / 2 + j * h);
kernel_tilde[(n_interpolation_points_1d + i) * n_fft_coeffs + (n_interpolation_points_1d + j)] = tmp;
kernel_tilde[(n_interpolation_points_1d - i) * n_fft_coeffs + (n_interpolation_points_1d + j)] = tmp;
kernel_tilde[(n_interpolation_points_1d + i) * n_fft_coeffs + (n_interpolation_points_1d - j)] = tmp;
kernel_tilde[(n_interpolation_points_1d - i) * n_fft_coeffs + (n_interpolation_points_1d - j)] = tmp;
}
__global__ void compute_upper_and_lower_bounds(
volatile float *__restrict__ box_upper_bounds,
volatile float *__restrict__ box_lower_bounds,
const float box_width,
const float x_min,
const float y_min,
const int n_boxes,
const int n_total_boxes)
{
register int TID, i, j;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_boxes * n_boxes)
return;
i = TID / n_boxes;
j = TID % n_boxes;
box_lower_bounds[i * n_boxes + j] = j * box_width + x_min;
box_upper_bounds[i * n_boxes + j] = (j + 1) * box_width + x_min;
box_lower_bounds[n_total_boxes + i * n_boxes + j] = i * box_width + y_min;
box_upper_bounds[n_total_boxes + i * n_boxes + j] = (i + 1) * box_width + y_min;
}
__global__ void copy_to_w_coefficients(
volatile float *__restrict__ w_coefficients_device,
const int *const output_indices,
const float *const output_values,
const int num_elements)
{
register int TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= num_elements)
return;
w_coefficients_device[output_indices[TID]] = output_values[TID];
}
void tsnecuda::PrecomputeFFT2D(
hipfftHandle &plan_kernel_tilde,
float x_max,
float x_min,
float y_max,
float y_min,
int n_boxes,
int n_interpolation_points,
thrust::device_vector<float> &box_lower_bounds_device,
thrust::device_vector<float> &box_upper_bounds_device,
thrust::device_vector<float> &kernel_tilde_device,
thrust::device_vector<thrust::complex<float>> &fft_kernel_tilde_device)
{
const int num_threads = 32;
int num_blocks = (n_boxes * n_boxes + num_threads - 1) / num_threads;
/*
* Set up the boxes
*/
int n_total_boxes = n_boxes * n_boxes;
float box_width = (x_max - x_min) / (float)n_boxes;
// Left and right bounds of each box, first the lower bounds in the x direction, then in the y direction
hipLaunchKernelGGL(( compute_upper_and_lower_bounds), dim3(num_blocks), dim3(num_threads), 0, 0,
thrust::raw_pointer_cast(box_upper_bounds_device.data()),
thrust::raw_pointer_cast(box_lower_bounds_device.data()),
box_width, x_min, y_min, n_boxes, n_total_boxes);
// Coordinates of all the equispaced interpolation points
int n_interpolation_points_1d = n_interpolation_points * n_boxes;
int n_fft_coeffs = 2 * n_interpolation_points_1d;
float h = box_width / (float)n_interpolation_points;
/*
* Evaluate the kernel at the interpolation nodes and form the embedded generating kernel vector for a circulant
* matrix
*/
// thrust::device_vector<float> kernel_tilde_device(n_fft_coeffs * n_fft_coeffs);
num_blocks = (n_interpolation_points_1d * n_interpolation_points_1d + num_threads - 1) / num_threads;
hipLaunchKernelGGL(( compute_kernel_tilde), dim3(num_blocks), dim3(num_threads), 0, 0,
thrust::raw_pointer_cast(kernel_tilde_device.data()),
x_min, y_min, h, n_interpolation_points_1d, n_fft_coeffs);
GpuErrorCheck(hipDeviceSynchronize());
// Precompute the FFT of the kernel generating matrix
hipfftExecR2C(plan_kernel_tilde,
reinterpret_cast<hipfftReal *>(thrust::raw_pointer_cast(kernel_tilde_device.data())),
reinterpret_cast<hipfftComplex *>(thrust::raw_pointer_cast(fft_kernel_tilde_device.data())));
}
void tsnecuda::NbodyFFT2D(
hipfftHandle &plan_dft,
hipfftHandle &plan_idft,
int N,
int n_terms,
int n_boxes,
int n_interpolation_points,
thrust::device_vector<thrust::complex<float>> &fft_kernel_tilde_device,
int n_total_boxes,
int total_interpolation_points,
float coord_min,
float box_width,
int n_fft_coeffs_half,
int n_fft_coeffs,
thrust::device_vector<float> &fft_input,
thrust::device_vector<thrust::complex<float>> &fft_w_coefficients,
thrust::device_vector<float> &fft_output,
thrust::device_vector<int> &point_box_idx_device,
thrust::device_vector<float> &x_in_box_device,
thrust::device_vector<float> &y_in_box_device,
thrust::device_vector<float> &points_device,
thrust::device_vector<float> &box_lower_bounds_device,
thrust::device_vector<float> &y_tilde_spacings_device,
thrust::device_vector<float> &denominator_device,
thrust::device_vector<float> &y_tilde_values,
thrust::device_vector<float> &all_interpolated_values_device,
thrust::device_vector<float> &output_values,
thrust::device_vector<int> &all_interpolated_indices,
thrust::device_vector<int> &output_indices,
thrust::device_vector<float> &w_coefficients_device,
thrust::device_vector<float> &chargesQij_device,
thrust::device_vector<float> &x_interpolated_values_device,
thrust::device_vector<float> &y_interpolated_values_device,
thrust::device_vector<float> &potentialsQij_device)
{
// std::cout << "start" << std::endl;
const int num_threads = 128;
int num_blocks = (N + num_threads - 1) / num_threads;
// Compute box indices and the relative position of each point in its box in the interval [0, 1]
hipLaunchKernelGGL(( compute_point_box_idx), dim3(num_blocks), dim3(num_threads), 0, 0,
thrust::raw_pointer_cast(point_box_idx_device.data()),
thrust::raw_pointer_cast(x_in_box_device.data()),
thrust::raw_pointer_cast(y_in_box_device.data()),
thrust::raw_pointer_cast(points_device.data()),
thrust::raw_pointer_cast(points_device.data() + N),
thrust::raw_pointer_cast(box_lower_bounds_device.data()),
coord_min,
box_width,
n_boxes,
n_total_boxes,
N);
GpuErrorCheck(hipDeviceSynchronize());
/*
* Step 1: Interpolate kernel using Lagrange polynomials and compute the w coefficients
*/
// TODO: We can stream-parallelize these two interpolation functions
// Compute the interpolated values at each real point with each Lagrange polynomial in the `x` direction
num_blocks = (N * n_interpolation_points + num_threads - 1) / num_threads;
hipLaunchKernelGGL(( interpolate_device), dim3(num_blocks), dim3(num_threads), 0, 0,
thrust::raw_pointer_cast(x_interpolated_values_device.data()),
thrust::raw_pointer_cast(x_in_box_device.data()),
thrust::raw_pointer_cast(y_tilde_spacings_device.data()),
thrust::raw_pointer_cast(denominator_device.data()),
n_interpolation_points,
N);
GpuErrorCheck(hipDeviceSynchronize()); // TODO: Remove the synchronization here
// Compute the interpolated values at each real point with each Lagrange polynomial in the `y` direction
hipLaunchKernelGGL(( interpolate_device), dim3(num_blocks), dim3(num_threads), 0, 0,
thrust::raw_pointer_cast(y_interpolated_values_device.data()),
thrust::raw_pointer_cast(y_in_box_device.data()),
thrust::raw_pointer_cast(y_tilde_spacings_device.data()),
thrust::raw_pointer_cast(denominator_device.data()),
n_interpolation_points,
N);
GpuErrorCheck(hipDeviceSynchronize());
//TODO: Synchronization required here
// TODO: This section has an atomic-add, can we remove it?
num_blocks = (n_terms * n_interpolation_points * n_interpolation_points * N + num_threads - 1) / num_threads;
hipLaunchKernelGGL(( compute_interpolated_indices), dim3(num_blocks), dim3(num_threads), 0, 0,
thrust::raw_pointer_cast(w_coefficients_device.data()),
thrust::raw_pointer_cast(point_box_idx_device.data()),
thrust::raw_pointer_cast(chargesQij_device.data()),
thrust::raw_pointer_cast(x_interpolated_values_device.data()),
thrust::raw_pointer_cast(y_interpolated_values_device.data()),
N,
n_interpolation_points,
n_boxes,
n_terms);
GpuErrorCheck(hipDeviceSynchronize());
/*
* Step 2: Compute the values v_{m, n} at the equispaced nodes, multiply the kernel matrix with the coefficients w
*/
num_blocks = ((n_terms * n_fft_coeffs_half * n_fft_coeffs_half) + num_threads - 1) / num_threads;
hipLaunchKernelGGL(( copy_to_fft_input), dim3(num_blocks), dim3(num_threads), 0, 0,
thrust::raw_pointer_cast(fft_input.data()),
thrust::raw_pointer_cast(w_coefficients_device.data()),
n_fft_coeffs,
n_fft_coeffs_half,
n_terms);
GpuErrorCheck(hipDeviceSynchronize());
// Compute fft values at interpolated nodes
hipfftExecR2C(plan_dft,
reinterpret_cast<hipfftReal *>(thrust::raw_pointer_cast(fft_input.data())),
reinterpret_cast<hipfftComplex *>(thrust::raw_pointer_cast(fft_w_coefficients.data())));
GpuErrorCheck(hipDeviceSynchronize());
// Take the broadcasted Hadamard product of a complex matrix and a complex vector
// TODO: Check timing on this kernel
tsnecuda::util::BroadcastMatrixVector(
fft_w_coefficients, fft_kernel_tilde_device, n_fft_coeffs * (n_fft_coeffs / 2 + 1), n_terms,
thrust::multiplies<thrust::complex<float>>(), 0, thrust::complex<float>(1.0));
// Invert the computed values at the interpolated nodes
hipfftExecC2R(plan_idft,
reinterpret_cast<hipfftComplex *>(thrust::raw_pointer_cast(fft_w_coefficients.data())),
reinterpret_cast<hipfftReal *>(thrust::raw_pointer_cast(fft_output.data())));
GpuErrorCheck(hipDeviceSynchronize());
hipLaunchKernelGGL(( copy_from_fft_output), dim3(num_blocks), dim3(num_threads), 0, 0,
thrust::raw_pointer_cast(y_tilde_values.data()),
thrust::raw_pointer_cast(fft_output.data()),
n_fft_coeffs,
n_fft_coeffs_half,
n_terms);
GpuErrorCheck(hipDeviceSynchronize());
/*
* Step 3: Compute the potentials \tilde{\phi}
*/
// TODO: Depending on the profiling here, we should check to see if we can split this code
num_blocks = (n_terms * n_interpolation_points * n_interpolation_points * N + num_threads - 1) / num_threads;
hipLaunchKernelGGL(( compute_potential_indices), dim3(num_blocks), dim3(num_threads), 0, 0,
thrust::raw_pointer_cast(potentialsQij_device.data()),
thrust::raw_pointer_cast(point_box_idx_device.data()),
thrust::raw_pointer_cast(y_tilde_values.data()),
thrust::raw_pointer_cast(x_interpolated_values_device.data()),
thrust::raw_pointer_cast(y_interpolated_values_device.data()),
N,
n_interpolation_points,
n_boxes,
n_terms);
GpuErrorCheck(hipDeviceSynchronize());
}
| 421a3c05ccea73a6c4e0258956728b241076113e.cu | #include "include/kernels/nbodyfft.h"
__global__ void copy_to_fft_input(volatile float *__restrict__ fft_input,
const float *w_coefficients_device,
const int n_fft_coeffs,
const int n_fft_coeffs_half,
const int n_terms)
{
register int i, j;
register int TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_terms * n_fft_coeffs_half * n_fft_coeffs_half)
return;
register int current_term = TID / (n_fft_coeffs_half * n_fft_coeffs_half);
register int current_loc = TID % (n_fft_coeffs_half * n_fft_coeffs_half);
i = current_loc / n_fft_coeffs_half;
j = current_loc % n_fft_coeffs_half;
fft_input[current_term * (n_fft_coeffs * n_fft_coeffs) + i * n_fft_coeffs + j] = w_coefficients_device[current_term + current_loc * n_terms];
}
__global__ void copy_from_fft_output(volatile float *__restrict__ y_tilde_values,
const float *fft_output,
const int n_fft_coeffs,
const int n_fft_coeffs_half,
const int n_terms)
{
register int i, j;
register int TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_terms * n_fft_coeffs_half * n_fft_coeffs_half)
return;
register int current_term = TID / (n_fft_coeffs_half * n_fft_coeffs_half);
register int current_loc = TID % (n_fft_coeffs_half * n_fft_coeffs_half);
i = current_loc / n_fft_coeffs_half + n_fft_coeffs_half;
j = current_loc % n_fft_coeffs_half + n_fft_coeffs_half;
y_tilde_values[current_term + n_terms * current_loc] = fft_output[current_term * (n_fft_coeffs * n_fft_coeffs) + i * n_fft_coeffs + j] / (float)(n_fft_coeffs * n_fft_coeffs);
}
__global__ void compute_point_box_idx(volatile int *__restrict__ point_box_idx,
volatile float *__restrict__ x_in_box,
volatile float *__restrict__ y_in_box,
const float *const xs,
const float *const ys,
const float *const box_lower_bounds,
const float coord_min,
const float box_width,
const int n_boxes,
const int n_total_boxes,
const int N)
{
register int TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= N)
return;
register int x_idx = (int)((xs[TID] - coord_min) / box_width);
register int y_idx = (int)((ys[TID] - coord_min) / box_width);
x_idx = max(0, x_idx);
x_idx = min(n_boxes - 1, x_idx);
y_idx = max(0, y_idx);
y_idx = min(n_boxes - 1, y_idx);
register int box_idx = y_idx * n_boxes + x_idx;
point_box_idx[TID] = box_idx;
x_in_box[TID] = (xs[TID] - box_lower_bounds[box_idx]) / box_width;
y_in_box[TID] = (ys[TID] - box_lower_bounds[n_total_boxes + box_idx]) / box_width;
}
__global__ void interpolate_device(
volatile float *__restrict__ interpolated_values,
const float *const y_in_box,
const float *const y_tilde_spacings,
const float *const denominator,
const int n_interpolation_points,
const int N)
{
register int TID, i, j, k;
register float value, ybox_i;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= N * n_interpolation_points)
return;
i = TID % N;
j = TID / N;
value = 1;
ybox_i = y_in_box[i];
for (k = 0; k < n_interpolation_points; k++)
{
if (j != k)
{
value *= ybox_i - y_tilde_spacings[k];
}
}
interpolated_values[j * N + i] = value / denominator[j];
}
__global__ void compute_interpolated_indices(
float *__restrict__ w_coefficients_device,
const int *const point_box_indices,
const float *const chargesQij,
const float *const x_interpolated_values,
const float *const y_interpolated_values,
const int N,
const int n_interpolation_points,
const int n_boxes,
const int n_terms)
{
register int TID, current_term, i, interp_i, interp_j, box_idx, box_i, box_j, idx;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_terms * n_interpolation_points * n_interpolation_points * N)
return;
current_term = TID % n_terms;
i = (TID / n_terms) % N;
interp_j = ((TID / n_terms) / N) % n_interpolation_points;
interp_i = ((TID / n_terms) / N) / n_interpolation_points;
box_idx = point_box_indices[i];
box_i = box_idx % n_boxes;
box_j = box_idx / n_boxes;
// interpolated_values[TID] = x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * chargesQij[i * n_terms + current_term];
idx = (box_i * n_interpolation_points + interp_i) * (n_boxes * n_interpolation_points) +
(box_j * n_interpolation_points) + interp_j;
// interpolated_indices[TID] = idx * n_terms + current_term;
atomicAdd(
w_coefficients_device + idx * n_terms + current_term,
x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * chargesQij[i * n_terms + current_term]);
}
__global__ void compute_potential_indices(
float *__restrict__ potentialsQij,
const int *const point_box_indices,
const float *const y_tilde_values,
const float *const x_interpolated_values,
const float *const y_interpolated_values,
const int N,
const int n_interpolation_points,
const int n_boxes,
const int n_terms)
{
register int TID, current_term, i, interp_i, interp_j, box_idx, box_i, box_j, idx;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_terms * n_interpolation_points * n_interpolation_points * N)
return;
current_term = TID % n_terms;
i = (TID / n_terms) % N;
interp_j = ((TID / n_terms) / N) % n_interpolation_points;
interp_i = ((TID / n_terms) / N) / n_interpolation_points;
box_idx = point_box_indices[i];
box_i = box_idx % n_boxes;
box_j = box_idx / n_boxes;
idx = (box_i * n_interpolation_points + interp_i) * (n_boxes * n_interpolation_points) +
(box_j * n_interpolation_points) + interp_j;
// interpolated_values[TID] = x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * y_tilde_values[idx * n_terms + current_term];
// interpolated_indices[TID] = i * n_terms + current_term;
atomicAdd(
potentialsQij + i * n_terms + current_term,
x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * y_tilde_values[idx * n_terms + current_term]);
}
__host__ __device__ float squared_cauchy_2d(float x1, float x2, float y1, float y2)
{
return pow(1.0 + pow(x1 - y1, 2) + pow(x2 - y2, 2), -2);
}
__global__ void compute_kernel_tilde(
volatile float *__restrict__ kernel_tilde,
const float x_min,
const float y_min,
const float h,
const int n_interpolation_points_1d,
const int n_fft_coeffs)
{
register int TID, i, j;
register float tmp;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_interpolation_points_1d * n_interpolation_points_1d)
return;
i = TID / n_interpolation_points_1d;
j = TID % n_interpolation_points_1d;
// TODO: Possibly issuing a memory pre-fetch here could help the code.
tmp = squared_cauchy_2d(y_min + h / 2, x_min + h / 2, y_min + h / 2 + i * h, x_min + h / 2 + j * h);
kernel_tilde[(n_interpolation_points_1d + i) * n_fft_coeffs + (n_interpolation_points_1d + j)] = tmp;
kernel_tilde[(n_interpolation_points_1d - i) * n_fft_coeffs + (n_interpolation_points_1d + j)] = tmp;
kernel_tilde[(n_interpolation_points_1d + i) * n_fft_coeffs + (n_interpolation_points_1d - j)] = tmp;
kernel_tilde[(n_interpolation_points_1d - i) * n_fft_coeffs + (n_interpolation_points_1d - j)] = tmp;
}
__global__ void compute_upper_and_lower_bounds(
volatile float *__restrict__ box_upper_bounds,
volatile float *__restrict__ box_lower_bounds,
const float box_width,
const float x_min,
const float y_min,
const int n_boxes,
const int n_total_boxes)
{
register int TID, i, j;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_boxes * n_boxes)
return;
i = TID / n_boxes;
j = TID % n_boxes;
box_lower_bounds[i * n_boxes + j] = j * box_width + x_min;
box_upper_bounds[i * n_boxes + j] = (j + 1) * box_width + x_min;
box_lower_bounds[n_total_boxes + i * n_boxes + j] = i * box_width + y_min;
box_upper_bounds[n_total_boxes + i * n_boxes + j] = (i + 1) * box_width + y_min;
}
__global__ void copy_to_w_coefficients(
volatile float *__restrict__ w_coefficients_device,
const int *const output_indices,
const float *const output_values,
const int num_elements)
{
register int TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= num_elements)
return;
w_coefficients_device[output_indices[TID]] = output_values[TID];
}
void tsnecuda::PrecomputeFFT2D(
cufftHandle &plan_kernel_tilde,
float x_max,
float x_min,
float y_max,
float y_min,
int n_boxes,
int n_interpolation_points,
thrust::device_vector<float> &box_lower_bounds_device,
thrust::device_vector<float> &box_upper_bounds_device,
thrust::device_vector<float> &kernel_tilde_device,
thrust::device_vector<thrust::complex<float>> &fft_kernel_tilde_device)
{
const int num_threads = 32;
int num_blocks = (n_boxes * n_boxes + num_threads - 1) / num_threads;
/*
* Set up the boxes
*/
int n_total_boxes = n_boxes * n_boxes;
float box_width = (x_max - x_min) / (float)n_boxes;
// Left and right bounds of each box, first the lower bounds in the x direction, then in the y direction
compute_upper_and_lower_bounds<<<num_blocks, num_threads>>>(
thrust::raw_pointer_cast(box_upper_bounds_device.data()),
thrust::raw_pointer_cast(box_lower_bounds_device.data()),
box_width, x_min, y_min, n_boxes, n_total_boxes);
// Coordinates of all the equispaced interpolation points
int n_interpolation_points_1d = n_interpolation_points * n_boxes;
int n_fft_coeffs = 2 * n_interpolation_points_1d;
float h = box_width / (float)n_interpolation_points;
/*
* Evaluate the kernel at the interpolation nodes and form the embedded generating kernel vector for a circulant
* matrix
*/
// thrust::device_vector<float> kernel_tilde_device(n_fft_coeffs * n_fft_coeffs);
num_blocks = (n_interpolation_points_1d * n_interpolation_points_1d + num_threads - 1) / num_threads;
compute_kernel_tilde<<<num_blocks, num_threads>>>(
thrust::raw_pointer_cast(kernel_tilde_device.data()),
x_min, y_min, h, n_interpolation_points_1d, n_fft_coeffs);
GpuErrorCheck(cudaDeviceSynchronize());
// Precompute the FFT of the kernel generating matrix
cufftExecR2C(plan_kernel_tilde,
reinterpret_cast<cufftReal *>(thrust::raw_pointer_cast(kernel_tilde_device.data())),
reinterpret_cast<cufftComplex *>(thrust::raw_pointer_cast(fft_kernel_tilde_device.data())));
}
void tsnecuda::NbodyFFT2D(
cufftHandle &plan_dft,
cufftHandle &plan_idft,
int N,
int n_terms,
int n_boxes,
int n_interpolation_points,
thrust::device_vector<thrust::complex<float>> &fft_kernel_tilde_device,
int n_total_boxes,
int total_interpolation_points,
float coord_min,
float box_width,
int n_fft_coeffs_half,
int n_fft_coeffs,
thrust::device_vector<float> &fft_input,
thrust::device_vector<thrust::complex<float>> &fft_w_coefficients,
thrust::device_vector<float> &fft_output,
thrust::device_vector<int> &point_box_idx_device,
thrust::device_vector<float> &x_in_box_device,
thrust::device_vector<float> &y_in_box_device,
thrust::device_vector<float> &points_device,
thrust::device_vector<float> &box_lower_bounds_device,
thrust::device_vector<float> &y_tilde_spacings_device,
thrust::device_vector<float> &denominator_device,
thrust::device_vector<float> &y_tilde_values,
thrust::device_vector<float> &all_interpolated_values_device,
thrust::device_vector<float> &output_values,
thrust::device_vector<int> &all_interpolated_indices,
thrust::device_vector<int> &output_indices,
thrust::device_vector<float> &w_coefficients_device,
thrust::device_vector<float> &chargesQij_device,
thrust::device_vector<float> &x_interpolated_values_device,
thrust::device_vector<float> &y_interpolated_values_device,
thrust::device_vector<float> &potentialsQij_device)
{
// std::cout << "start" << std::endl;
const int num_threads = 128;
int num_blocks = (N + num_threads - 1) / num_threads;
// Compute box indices and the relative position of each point in its box in the interval [0, 1]
compute_point_box_idx<<<num_blocks, num_threads>>>(
thrust::raw_pointer_cast(point_box_idx_device.data()),
thrust::raw_pointer_cast(x_in_box_device.data()),
thrust::raw_pointer_cast(y_in_box_device.data()),
thrust::raw_pointer_cast(points_device.data()),
thrust::raw_pointer_cast(points_device.data() + N),
thrust::raw_pointer_cast(box_lower_bounds_device.data()),
coord_min,
box_width,
n_boxes,
n_total_boxes,
N);
GpuErrorCheck(cudaDeviceSynchronize());
/*
* Step 1: Interpolate kernel using Lagrange polynomials and compute the w coefficients
*/
// TODO: We can stream-parallelize these two interpolation functions
// Compute the interpolated values at each real point with each Lagrange polynomial in the `x` direction
num_blocks = (N * n_interpolation_points + num_threads - 1) / num_threads;
interpolate_device<<<num_blocks, num_threads>>>(
thrust::raw_pointer_cast(x_interpolated_values_device.data()),
thrust::raw_pointer_cast(x_in_box_device.data()),
thrust::raw_pointer_cast(y_tilde_spacings_device.data()),
thrust::raw_pointer_cast(denominator_device.data()),
n_interpolation_points,
N);
GpuErrorCheck(cudaDeviceSynchronize()); // TODO: Remove the synchronization here
// Compute the interpolated values at each real point with each Lagrange polynomial in the `y` direction
interpolate_device<<<num_blocks, num_threads>>>(
thrust::raw_pointer_cast(y_interpolated_values_device.data()),
thrust::raw_pointer_cast(y_in_box_device.data()),
thrust::raw_pointer_cast(y_tilde_spacings_device.data()),
thrust::raw_pointer_cast(denominator_device.data()),
n_interpolation_points,
N);
GpuErrorCheck(cudaDeviceSynchronize());
//TODO: Synchronization required here
// TODO: This section has an atomic-add, can we remove it?
num_blocks = (n_terms * n_interpolation_points * n_interpolation_points * N + num_threads - 1) / num_threads;
compute_interpolated_indices<<<num_blocks, num_threads>>>(
thrust::raw_pointer_cast(w_coefficients_device.data()),
thrust::raw_pointer_cast(point_box_idx_device.data()),
thrust::raw_pointer_cast(chargesQij_device.data()),
thrust::raw_pointer_cast(x_interpolated_values_device.data()),
thrust::raw_pointer_cast(y_interpolated_values_device.data()),
N,
n_interpolation_points,
n_boxes,
n_terms);
GpuErrorCheck(cudaDeviceSynchronize());
/*
* Step 2: Compute the values v_{m, n} at the equispaced nodes, multiply the kernel matrix with the coefficients w
*/
num_blocks = ((n_terms * n_fft_coeffs_half * n_fft_coeffs_half) + num_threads - 1) / num_threads;
copy_to_fft_input<<<num_blocks, num_threads>>>(
thrust::raw_pointer_cast(fft_input.data()),
thrust::raw_pointer_cast(w_coefficients_device.data()),
n_fft_coeffs,
n_fft_coeffs_half,
n_terms);
GpuErrorCheck(cudaDeviceSynchronize());
// Compute fft values at interpolated nodes
cufftExecR2C(plan_dft,
reinterpret_cast<cufftReal *>(thrust::raw_pointer_cast(fft_input.data())),
reinterpret_cast<cufftComplex *>(thrust::raw_pointer_cast(fft_w_coefficients.data())));
GpuErrorCheck(cudaDeviceSynchronize());
// Take the broadcasted Hadamard product of a complex matrix and a complex vector
// TODO: Check timing on this kernel
tsnecuda::util::BroadcastMatrixVector(
fft_w_coefficients, fft_kernel_tilde_device, n_fft_coeffs * (n_fft_coeffs / 2 + 1), n_terms,
thrust::multiplies<thrust::complex<float>>(), 0, thrust::complex<float>(1.0));
// Invert the computed values at the interpolated nodes
cufftExecC2R(plan_idft,
reinterpret_cast<cufftComplex *>(thrust::raw_pointer_cast(fft_w_coefficients.data())),
reinterpret_cast<cufftReal *>(thrust::raw_pointer_cast(fft_output.data())));
GpuErrorCheck(cudaDeviceSynchronize());
copy_from_fft_output<<<num_blocks, num_threads>>>(
thrust::raw_pointer_cast(y_tilde_values.data()),
thrust::raw_pointer_cast(fft_output.data()),
n_fft_coeffs,
n_fft_coeffs_half,
n_terms);
GpuErrorCheck(cudaDeviceSynchronize());
/*
* Step 3: Compute the potentials \tilde{\phi}
*/
// TODO: Depending on the profiling here, we should check to see if we can split this code
num_blocks = (n_terms * n_interpolation_points * n_interpolation_points * N + num_threads - 1) / num_threads;
compute_potential_indices<<<num_blocks, num_threads>>>(
thrust::raw_pointer_cast(potentialsQij_device.data()),
thrust::raw_pointer_cast(point_box_idx_device.data()),
thrust::raw_pointer_cast(y_tilde_values.data()),
thrust::raw_pointer_cast(x_interpolated_values_device.data()),
thrust::raw_pointer_cast(y_interpolated_values_device.data()),
N,
n_interpolation_points,
n_boxes,
n_terms);
GpuErrorCheck(cudaDeviceSynchronize());
}
|
580808d992c1cd8eb8035030ac1d4aca7c1e8a97.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "palisade.h"
#include "palisadecore.h"
#include "palisade/trapdoor/abe/kp_abe_rns.h"
using namespace std;
using namespace lbcrypto;
//#include <assert.h>
//#define cdpErrchk(ans) { cdpAssert((ans), __FILE__, __LINE__); }
//__device__ void cdpAssert(hipError_t code, const char *file, int line, bool abort=true)
//{
// if (code != hipSuccess)
// {
// printf("GPU kernel assert: %s %s %d\n", hipGetErrorString(code), file, line);
// if (abort) assert(0);
// }
//}
#define CUDA_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void copyRow(unsigned long long * &A, unsigned long long * &B, int m, unsigned q_amount, unsigned n) {
unsigned int index = 0;
for (int i = 0; i < m; i++){
for (int j = 0; j < q_amount; j++, index+=n){
CUDA_CALL(hipMemcpy(A+index, B+index, sizeof(unsigned long long) * n, hipMemcpyDeviceToDevice));
}
}
}
int KPABE_BenchmarkCircuitTestDCRT(usint iter, int32_t base, usint n, size_t size, usint ell);
usint EvalNANDTree(usint *x, usint ell);
int main() {
// PalisadeParallelControls.Enable();
PseudoRandomNumberGenerator::InitPRNG();
usint iter = 4;
usint att = 2;
usint q_size = 4;
usint n = 1 << 12;
usint base = 1 << 20;
KPABE_BenchmarkCircuitTestDCRT(iter, base,n,q_size,att);
return 0;
}
void copyMatrixH2D(unsigned long long *&dest, Matrix<DCRTPoly> src) {
vector<vector<DCRTPoly>> matrix = src.GetData();
unsigned long long index = 0;
for (int i = 0; i < src.GetRows(); i++){
for (int j = 0; j < src.GetRows(); j++){
int q_size = matrix[i][j].GetParams()->GetParams().size();
vector<PolyImpl<NativeVector>> rns_poly = matrix[i][j].GetAllElements();
int n = rns_poly[0].GetRingDimension();
for (int k = 0; k < q_size; k++, index+=n)
CUDA_CALL(hipMemcpy(dest+index, &rns_poly[k].GetValues()[0], sizeof(unsigned long long) * n,hipMemcpyHostToDevice));
}
}
}
void EvalCT_GPU(KPABErns &kpabe, const shared_ptr<ILDCRTParams<BigInteger>> ¶ms, unsigned long long* &negPubElemB_device,
usint x[], usint *x_device, unsigned long long* &origCT_device, usint *&evalAttributes_dev,
unsigned long long* &evalCT_device) {
vector<LatticeSubgaussianUtility<NativeInteger>> util = kpabe.Get_util();
usint ell = kpabe.Get_ell();
usint m = kpabe.Get_m();
usint n = params->GetRingDimension();
usint q_size = params->GetParams().size();
// Part pertaining to A (does not change)
copyRow(evalCT_device, origCT_device,m,q_size,n);
usint gateCnt = ell - 1;
// Matrix<DCRTPoly> psi(zero_alloc, m_m, m_m);
// w stands for Wire
unsigned long long* wPublicElementB;
// createMatrix(wPublicElementB, gateCnt, m, q_size, n); // Bis associated with internal wires of the circuit
CUDA_CALL(hipMalloc(&wPublicElementB, sizeof(unsigned long long) * gateCnt * m * q_size * n));
unsigned long long* wCT;
// createMatrix(wCT, gateCnt, m, q_size, n); // Ciphertexts associated with internal wires of the circuit
CUDA_CALL(hipMalloc(&wCT, sizeof(unsigned long long) * gateCnt * m * q_size * n));
// Attribute values associated with internal wires of the circuit
//TODO check this one
std::vector<usint> wX(gateCnt);
// Temporary variables for bit decomposition operation
unsigned long long* negB;
// createMatrix(negB, gateCnt, m, q_size, n); // Format::EVALUATION (NTT domain)
CUDA_CALL(hipMalloc(&negB, sizeof(unsigned long long) * gateCnt * m * q_size * n));
// Input level of the circuit
usint t = ell >> 1; // the number of the gates in the first level (the
// number of input gates)
// looping to evaluate and calculate w, wB, wC
// and R for all first level input gates
for (usint i = 0; i < t; i++)
wX[i] = x[0] - x[2 * i + 1] * x[2 * i + 2]; // calculating binary wire value
//
//#pragma omp parallel for schedule(dynamic)
// for (usint j = 0; j < m; j++) { // Negating Bis for bit decomposition
// negB(0, j) = pubElemB(2 * i + 1, j).Negate();
// negB(0, j).SwitchFormat();
// }
}
int KPABE_BenchmarkCircuitTestDCRT(usint iter, int32_t base, usint n, size_t size, usint ell) {
// usint n = 1 << 12; // cyclotomic order
size_t kRes = 50; // CRT modulus size
// usint ell = 4; // No of attributes
// size_t size = 2; // Number of CRT moduli
std::cout << "Number of attributes: " << ell << std::endl;
std::cout << "n: " << n << std::endl;
// double sigma = SIGMA;
std::vector<NativeInteger> moduli;
std::vector<NativeInteger> roots_Of_Unity;
// makes sure the first integer is less than 2^60-1 to take advangate of NTL
// optimizations
NativeInteger firstInteger = FirstPrime<NativeInteger>(kRes, 2 * n);
NativeInteger q = PreviousPrime<NativeInteger>(firstInteger, 2 * n);
moduli.push_back(q);
roots_Of_Unity.push_back(RootOfUnity<NativeInteger>(2 * n, moduli[0]));
std::cout << "q["<< 0 <<"]_k: " << q.GetMSB() << std::endl;
NativeInteger prevQ = q;
for (size_t i = 1; i < size; i++) {
prevQ = lbcrypto::PreviousPrime<NativeInteger>(prevQ, 2 * n);
NativeInteger nextRootOfUnity(RootOfUnity<NativeInteger>(2 * n, prevQ));
moduli.push_back(prevQ);
std::cout << "q["<< i <<"]_k: " << moduli[i].GetMSB() << std::endl;
roots_Of_Unity.push_back(nextRootOfUnity);
}
auto ilDCRTParams =
std::make_shared<ILDCRTParams<BigInteger>>(2 * n, moduli, roots_Of_Unity);
ChineseRemainderTransformFTT<NativeVector>::PreCompute(roots_Of_Unity, 2 * n,
moduli);
std::cout << "k: " << ilDCRTParams->GetModulus().GetMSB() << std::endl;
size_t digitCount = (long)ceil(
log2(ilDCRTParams->GetParams()[0]->GetModulus().ConvertToDouble()) /
log2(base));
size_t k = digitCount * ilDCRTParams->GetParams().size();
std::cout << "digit count = " << digitCount << std::endl;
// std::cout << "k = " << k << std::endl;
size_t m = k + 2;
auto zero_alloc = DCRTPoly::Allocator(ilDCRTParams, Format::COEFFICIENT);
DCRTPoly::DggType dgg = DCRTPoly::DggType(SIGMA);
DCRTPoly::DugType dug = DCRTPoly::DugType();
DCRTPoly::BugType bug = DCRTPoly::BugType();
// Trapdoor Generation
std::pair<Matrix<DCRTPoly>, RLWETrapdoorPair<DCRTPoly>> trapdoorA =
RLWETrapdoorUtility<DCRTPoly>::TrapdoorGen(
ilDCRTParams, SIGMA, base); // A.first is the public element
DCRTPoly pubElemBeta(dug, ilDCRTParams, Format::EVALUATION);
Matrix<DCRTPoly> publicElementB(zero_alloc, ell + 1, m);
Matrix<DCRTPoly> ctCin(zero_alloc, ell + 2, m);
DCRTPoly c1(dug, ilDCRTParams, Format::EVALUATION);
KPABErns pkg, sender, receiver;
pkg.Setup(ilDCRTParams, base, ell, dug, &publicElementB);
sender.Setup(ilDCRTParams, base, ell);
receiver.Setup(ilDCRTParams, base, ell);
// Attribute values all are set to 1 for NAND gate Format::EVALUATION
std::vector<usint> x(ell + 1);
x[0] = 1;
usint found = 0;
while (found == 0) {
for (usint i = 1; i < ell + 1; i++)
// x[i] = rand() & 0x1;
x[i] = bug.GenerateInteger().ConvertToInt();
if (EvalNANDTree(&x[1], ell) == 0) found = 1;
}
usint y;
TimeVar t1;
double avg_keygen(0.0), avg_evalct(0.0), avg_evalpk(0.0), avg_enc(0.0),
avg_dec(0.0);
// plaintext
for (usint i = 0; i < iter; i++) {
std::cout << "running iter " << i + 1 << std::endl;
NativePoly ptext(bug, ilDCRTParams->GetParams()[0], Format::COEFFICIENT);
// circuit outputs
Matrix<DCRTPoly> evalBf(
DCRTPoly::Allocator(ilDCRTParams, Format::EVALUATION), 1,
m); // evaluated Bs
Matrix<DCRTPoly> evalCf(
DCRTPoly::Allocator(ilDCRTParams, Format::EVALUATION), 1,
m); // evaluated Cs
Matrix<DCRTPoly> ctCA(DCRTPoly::Allocator(ilDCRTParams, Format::EVALUATION),
1, m); // CA
// secret key corresponding to the circuit output
Matrix<DCRTPoly> sk(zero_alloc, 2, m);
// decrypted text
NativePoly dtext;
// Switches to Format::EVALUATION representation
// ptext.SwitchFormat();
TIC(t1);
sender.Encrypt(ilDCRTParams, trapdoorA.first, publicElementB, pubElemBeta,
&x[0], ptext, dgg, dug, bug, &ctCin,
&c1); // Cin and c1 are the ciphertext
avg_enc += TOC(t1);
ctCA = ctCin.ExtractRow(0); // CA is A^T * s + e 0,A
// Allocate and copy variables used by functions
unsigned long long* publicElemB_device;
// createMatrix(negPubElemB_device,ell + 1, m,size,n);
CUDA_CALL(hipMalloc(reinterpret_cast<void **>(&publicElemB_device), sizeof(unsigned long long) * (ell + 1) * m * size * n));
copyMatrixH2D(publicElemB_device,publicElementB);
usint* x_device;
CUDA_CALL(hipMalloc(&x_device,(ell+1) * sizeof(usint)));
CUDA_CALL(hipMemcpy(x_device,&x[0], (ell+1) * sizeof(usint),hipMemcpyHostToDevice));
unsigned long long* ctCin_device;
// TODO: Bunu tekrar bak
// createMatrix(ctCin_device,ell+1, m,size,n);
CUDA_CALL(hipMalloc(&ctCin_device, sizeof(unsigned long long) * (ell + 1) * m * size * n));
unsigned long long* evalCf_device;
// createMatrix(evalCf_device,1, m,size,n);
CUDA_CALL(hipMalloc(&ctCin_device, sizeof(unsigned long long) * (1) * m * size * n));
copyMatrixH2D(ctCin_device,ctCin.ExtractRows(1, ell + 1));
usint* y_device;
hipMalloc(&y_device, sizeof(usint));
EvalCT_GPU(sender, ilDCRTParams, publicElemB_device, &x[0], x_device, ctCin_device, y_device, evalCf_device);
TIC(t1);
receiver.EvalCT(ilDCRTParams, publicElementB, &x[0],
ctCin.ExtractRows(1, ell + 1), &y, &evalCf);
avg_evalct += TOC(t1);
TIC(t1);
pkg.EvalPK(ilDCRTParams, publicElementB, &evalBf);
avg_evalpk += TOC(t1);
TIC(t1);
pkg.KeyGen(ilDCRTParams, trapdoorA.first, evalBf, pubElemBeta,
trapdoorA.second, dgg, &sk);
avg_keygen += TOC(t1);
// CheckSecretKeyKPDCRT(m, trapdoorA.first, evalBf, sk, pubElemBeta);
TIC(t1);
receiver.Decrypt(ilDCRTParams, sk, ctCA, evalCf, c1, &dtext);
avg_dec += TOC_US(t1);
NativeVector ptext2 = ptext.GetValues();
ptext2.SetModulus(NativeInteger(2));
if (ptext2 != dtext.GetValues()) {
std::cout << "Decryption fails at iteration: " << i << std::endl;
// std::cerr << ptext << std::endl;
// std::cerr << dtext << std::endl;
return 0;
}
// std::cerr << ptext << std::endl;
// std::cerr << dtext << std::endl;
}
std::cout << "Encryption is successful after " << iter << " iterations!\n";
std::cout << "Average key generation time : "
<< "\t" << (avg_keygen) / iter << " ms" << std::endl;
std::cout << "Average ciphertext Format::EVALUATION time : "
<< "\t" << (avg_evalct) / iter << " ms" << std::endl;
std::cout << "Average public key Format::EVALUATION time : "
<< "\t" << (avg_evalpk) / iter << " ms" << std::endl;
std::cout << "Average encryption time : "
<< "\t" << (avg_enc) / iter << " ms" << std::endl;
std::cout << "Average decryption time : "
<< "\t" << (avg_dec) / (iter * 1000) << " ms" << std::endl;
return 0;
}
usint EvalNANDTree(usint *x, usint ell) {
usint y;
if (ell == 2) {
y = 1 - x[0] * x[1];
return y;
} else {
ell >>= 1;
y = 1 - (EvalNANDTree(&x[0], ell) * EvalNANDTree(&x[ell], ell));
}
return y;
}
| 580808d992c1cd8eb8035030ac1d4aca7c1e8a97.cu | #include <iostream>
#include "palisade.h"
#include "palisadecore.h"
#include "palisade/trapdoor/abe/kp_abe_rns.h"
using namespace std;
using namespace lbcrypto;
//#include <assert.h>
//#define cdpErrchk(ans) { cdpAssert((ans), __FILE__, __LINE__); }
//__device__ void cdpAssert(cudaError_t code, const char *file, int line, bool abort=true)
//{
// if (code != cudaSuccess)
// {
// printf("GPU kernel assert: %s %s %d\n", cudaGetErrorString(code), file, line);
// if (abort) assert(0);
// }
//}
#define CUDA_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void copyRow(unsigned long long * &A, unsigned long long * &B, int m, unsigned q_amount, unsigned n) {
unsigned int index = 0;
for (int i = 0; i < m; i++){
for (int j = 0; j < q_amount; j++, index+=n){
CUDA_CALL(cudaMemcpy(A+index, B+index, sizeof(unsigned long long) * n, cudaMemcpyDeviceToDevice));
}
}
}
int KPABE_BenchmarkCircuitTestDCRT(usint iter, int32_t base, usint n, size_t size, usint ell);
usint EvalNANDTree(usint *x, usint ell);
int main() {
// PalisadeParallelControls.Enable();
PseudoRandomNumberGenerator::InitPRNG();
usint iter = 4;
usint att = 2;
usint q_size = 4;
usint n = 1 << 12;
usint base = 1 << 20;
KPABE_BenchmarkCircuitTestDCRT(iter, base,n,q_size,att);
return 0;
}
void copyMatrixH2D(unsigned long long *&dest, Matrix<DCRTPoly> src) {
vector<vector<DCRTPoly>> matrix = src.GetData();
unsigned long long index = 0;
for (int i = 0; i < src.GetRows(); i++){
for (int j = 0; j < src.GetRows(); j++){
int q_size = matrix[i][j].GetParams()->GetParams().size();
vector<PolyImpl<NativeVector>> rns_poly = matrix[i][j].GetAllElements();
int n = rns_poly[0].GetRingDimension();
for (int k = 0; k < q_size; k++, index+=n)
CUDA_CALL(cudaMemcpy(dest+index, &rns_poly[k].GetValues()[0], sizeof(unsigned long long) * n,cudaMemcpyHostToDevice));
}
}
}
void EvalCT_GPU(KPABErns &kpabe, const shared_ptr<ILDCRTParams<BigInteger>> ¶ms, unsigned long long* &negPubElemB_device,
usint x[], usint *x_device, unsigned long long* &origCT_device, usint *&evalAttributes_dev,
unsigned long long* &evalCT_device) {
vector<LatticeSubgaussianUtility<NativeInteger>> util = kpabe.Get_util();
usint ell = kpabe.Get_ell();
usint m = kpabe.Get_m();
usint n = params->GetRingDimension();
usint q_size = params->GetParams().size();
// Part pertaining to A (does not change)
copyRow(evalCT_device, origCT_device,m,q_size,n);
usint gateCnt = ell - 1;
// Matrix<DCRTPoly> psi(zero_alloc, m_m, m_m);
// w stands for Wire
unsigned long long* wPublicElementB;
// createMatrix(wPublicElementB, gateCnt, m, q_size, n); // Bis associated with internal wires of the circuit
CUDA_CALL(cudaMalloc(&wPublicElementB, sizeof(unsigned long long) * gateCnt * m * q_size * n));
unsigned long long* wCT;
// createMatrix(wCT, gateCnt, m, q_size, n); // Ciphertexts associated with internal wires of the circuit
CUDA_CALL(cudaMalloc(&wCT, sizeof(unsigned long long) * gateCnt * m * q_size * n));
// Attribute values associated with internal wires of the circuit
//TODO check this one
std::vector<usint> wX(gateCnt);
// Temporary variables for bit decomposition operation
unsigned long long* negB;
// createMatrix(negB, gateCnt, m, q_size, n); // Format::EVALUATION (NTT domain)
CUDA_CALL(cudaMalloc(&negB, sizeof(unsigned long long) * gateCnt * m * q_size * n));
// Input level of the circuit
usint t = ell >> 1; // the number of the gates in the first level (the
// number of input gates)
// looping to evaluate and calculate w, wB, wC
// and R for all first level input gates
for (usint i = 0; i < t; i++)
wX[i] = x[0] - x[2 * i + 1] * x[2 * i + 2]; // calculating binary wire value
//
//#pragma omp parallel for schedule(dynamic)
// for (usint j = 0; j < m; j++) { // Negating Bis for bit decomposition
// negB(0, j) = pubElemB(2 * i + 1, j).Negate();
// negB(0, j).SwitchFormat();
// }
}
int KPABE_BenchmarkCircuitTestDCRT(usint iter, int32_t base, usint n, size_t size, usint ell) {
// usint n = 1 << 12; // cyclotomic order
size_t kRes = 50; // CRT modulus size
// usint ell = 4; // No of attributes
// size_t size = 2; // Number of CRT moduli
std::cout << "Number of attributes: " << ell << std::endl;
std::cout << "n: " << n << std::endl;
// double sigma = SIGMA;
std::vector<NativeInteger> moduli;
std::vector<NativeInteger> roots_Of_Unity;
// makes sure the first integer is less than 2^60-1 to take advangate of NTL
// optimizations
NativeInteger firstInteger = FirstPrime<NativeInteger>(kRes, 2 * n);
NativeInteger q = PreviousPrime<NativeInteger>(firstInteger, 2 * n);
moduli.push_back(q);
roots_Of_Unity.push_back(RootOfUnity<NativeInteger>(2 * n, moduli[0]));
std::cout << "q["<< 0 <<"]_k: " << q.GetMSB() << std::endl;
NativeInteger prevQ = q;
for (size_t i = 1; i < size; i++) {
prevQ = lbcrypto::PreviousPrime<NativeInteger>(prevQ, 2 * n);
NativeInteger nextRootOfUnity(RootOfUnity<NativeInteger>(2 * n, prevQ));
moduli.push_back(prevQ);
std::cout << "q["<< i <<"]_k: " << moduli[i].GetMSB() << std::endl;
roots_Of_Unity.push_back(nextRootOfUnity);
}
auto ilDCRTParams =
std::make_shared<ILDCRTParams<BigInteger>>(2 * n, moduli, roots_Of_Unity);
ChineseRemainderTransformFTT<NativeVector>::PreCompute(roots_Of_Unity, 2 * n,
moduli);
std::cout << "k: " << ilDCRTParams->GetModulus().GetMSB() << std::endl;
size_t digitCount = (long)ceil(
log2(ilDCRTParams->GetParams()[0]->GetModulus().ConvertToDouble()) /
log2(base));
size_t k = digitCount * ilDCRTParams->GetParams().size();
std::cout << "digit count = " << digitCount << std::endl;
// std::cout << "k = " << k << std::endl;
size_t m = k + 2;
auto zero_alloc = DCRTPoly::Allocator(ilDCRTParams, Format::COEFFICIENT);
DCRTPoly::DggType dgg = DCRTPoly::DggType(SIGMA);
DCRTPoly::DugType dug = DCRTPoly::DugType();
DCRTPoly::BugType bug = DCRTPoly::BugType();
// Trapdoor Generation
std::pair<Matrix<DCRTPoly>, RLWETrapdoorPair<DCRTPoly>> trapdoorA =
RLWETrapdoorUtility<DCRTPoly>::TrapdoorGen(
ilDCRTParams, SIGMA, base); // A.first is the public element
DCRTPoly pubElemBeta(dug, ilDCRTParams, Format::EVALUATION);
Matrix<DCRTPoly> publicElementB(zero_alloc, ell + 1, m);
Matrix<DCRTPoly> ctCin(zero_alloc, ell + 2, m);
DCRTPoly c1(dug, ilDCRTParams, Format::EVALUATION);
KPABErns pkg, sender, receiver;
pkg.Setup(ilDCRTParams, base, ell, dug, &publicElementB);
sender.Setup(ilDCRTParams, base, ell);
receiver.Setup(ilDCRTParams, base, ell);
// Attribute values all are set to 1 for NAND gate Format::EVALUATION
std::vector<usint> x(ell + 1);
x[0] = 1;
usint found = 0;
while (found == 0) {
for (usint i = 1; i < ell + 1; i++)
// x[i] = rand() & 0x1;
x[i] = bug.GenerateInteger().ConvertToInt();
if (EvalNANDTree(&x[1], ell) == 0) found = 1;
}
usint y;
TimeVar t1;
double avg_keygen(0.0), avg_evalct(0.0), avg_evalpk(0.0), avg_enc(0.0),
avg_dec(0.0);
// plaintext
for (usint i = 0; i < iter; i++) {
std::cout << "running iter " << i + 1 << std::endl;
NativePoly ptext(bug, ilDCRTParams->GetParams()[0], Format::COEFFICIENT);
// circuit outputs
Matrix<DCRTPoly> evalBf(
DCRTPoly::Allocator(ilDCRTParams, Format::EVALUATION), 1,
m); // evaluated Bs
Matrix<DCRTPoly> evalCf(
DCRTPoly::Allocator(ilDCRTParams, Format::EVALUATION), 1,
m); // evaluated Cs
Matrix<DCRTPoly> ctCA(DCRTPoly::Allocator(ilDCRTParams, Format::EVALUATION),
1, m); // CA
// secret key corresponding to the circuit output
Matrix<DCRTPoly> sk(zero_alloc, 2, m);
// decrypted text
NativePoly dtext;
// Switches to Format::EVALUATION representation
// ptext.SwitchFormat();
TIC(t1);
sender.Encrypt(ilDCRTParams, trapdoorA.first, publicElementB, pubElemBeta,
&x[0], ptext, dgg, dug, bug, &ctCin,
&c1); // Cin and c1 are the ciphertext
avg_enc += TOC(t1);
ctCA = ctCin.ExtractRow(0); // CA is A^T * s + e 0,A
// Allocate and copy variables used by functions
unsigned long long* publicElemB_device;
// createMatrix(negPubElemB_device,ell + 1, m,size,n);
CUDA_CALL(cudaMalloc(reinterpret_cast<void **>(&publicElemB_device), sizeof(unsigned long long) * (ell + 1) * m * size * n));
copyMatrixH2D(publicElemB_device,publicElementB);
usint* x_device;
CUDA_CALL(cudaMalloc(&x_device,(ell+1) * sizeof(usint)));
CUDA_CALL(cudaMemcpy(x_device,&x[0], (ell+1) * sizeof(usint),cudaMemcpyHostToDevice));
unsigned long long* ctCin_device;
// TODO: Bunu tekrar bak
// createMatrix(ctCin_device,ell+1, m,size,n);
CUDA_CALL(cudaMalloc(&ctCin_device, sizeof(unsigned long long) * (ell + 1) * m * size * n));
unsigned long long* evalCf_device;
// createMatrix(evalCf_device,1, m,size,n);
CUDA_CALL(cudaMalloc(&ctCin_device, sizeof(unsigned long long) * (1) * m * size * n));
copyMatrixH2D(ctCin_device,ctCin.ExtractRows(1, ell + 1));
usint* y_device;
cudaMalloc(&y_device, sizeof(usint));
EvalCT_GPU(sender, ilDCRTParams, publicElemB_device, &x[0], x_device, ctCin_device, y_device, evalCf_device);
TIC(t1);
receiver.EvalCT(ilDCRTParams, publicElementB, &x[0],
ctCin.ExtractRows(1, ell + 1), &y, &evalCf);
avg_evalct += TOC(t1);
TIC(t1);
pkg.EvalPK(ilDCRTParams, publicElementB, &evalBf);
avg_evalpk += TOC(t1);
TIC(t1);
pkg.KeyGen(ilDCRTParams, trapdoorA.first, evalBf, pubElemBeta,
trapdoorA.second, dgg, &sk);
avg_keygen += TOC(t1);
// CheckSecretKeyKPDCRT(m, trapdoorA.first, evalBf, sk, pubElemBeta);
TIC(t1);
receiver.Decrypt(ilDCRTParams, sk, ctCA, evalCf, c1, &dtext);
avg_dec += TOC_US(t1);
NativeVector ptext2 = ptext.GetValues();
ptext2.SetModulus(NativeInteger(2));
if (ptext2 != dtext.GetValues()) {
std::cout << "Decryption fails at iteration: " << i << std::endl;
// std::cerr << ptext << std::endl;
// std::cerr << dtext << std::endl;
return 0;
}
// std::cerr << ptext << std::endl;
// std::cerr << dtext << std::endl;
}
std::cout << "Encryption is successful after " << iter << " iterations!\n";
std::cout << "Average key generation time : "
<< "\t" << (avg_keygen) / iter << " ms" << std::endl;
std::cout << "Average ciphertext Format::EVALUATION time : "
<< "\t" << (avg_evalct) / iter << " ms" << std::endl;
std::cout << "Average public key Format::EVALUATION time : "
<< "\t" << (avg_evalpk) / iter << " ms" << std::endl;
std::cout << "Average encryption time : "
<< "\t" << (avg_enc) / iter << " ms" << std::endl;
std::cout << "Average decryption time : "
<< "\t" << (avg_dec) / (iter * 1000) << " ms" << std::endl;
return 0;
}
usint EvalNANDTree(usint *x, usint ell) {
usint y;
if (ell == 2) {
y = 1 - x[0] * x[1];
return y;
} else {
ell >>= 1;
y = 1 - (EvalNANDTree(&x[0], ell) * EvalNANDTree(&x[ell], ell));
}
return y;
}
|
63f4ce4cb51a589e9ed3edff60943b94d513b461.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifndef _VECTOR_REDUCTION_KERNEL_H_
#define _VECTOR_REDUCTION_KERNEL_H_
#define NUM_ELEMENTS 1024
// **===----------------- Modify this function ---------------------===**
//! @param g_idata input data in global memory
// result is expected in index 0 of g_idata
//! @param n input number of elements to scan from input data
// **===------------------------------------------------------------------===**
__global__ void reduction(float *g_data, float *g_odata, int n)
{
__shared__ double sdata[1024];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = g_data[i];
__syncthreads();
/*
for (unsigned int s = 1 ; s < blockDim.x; s*=2){
int index = 2 * s * tid;
if (index < blockDim.x){
sdata [index] += sdata[index + s];
}
__syncthreads();
}
*/
for (unsigned int s = 1; s < blockDim.x; s*=2){
if (tid %(2*s) == 0){
sdata[tid] += sdata [tid + s];
}
__syncthreads();
}
/*
for (unsigned int s = blockDim.x/2; s> 0; s >>=1 ){
if (tid < s){
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
*/
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
#endif // #ifndef _VECTOR_REDUCTION_KERNEL_H_
| 63f4ce4cb51a589e9ed3edff60943b94d513b461.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifndef _VECTOR_REDUCTION_KERNEL_H_
#define _VECTOR_REDUCTION_KERNEL_H_
#define NUM_ELEMENTS 1024
// **===----------------- Modify this function ---------------------===**
//! @param g_idata input data in global memory
// result is expected in index 0 of g_idata
//! @param n input number of elements to scan from input data
// **===------------------------------------------------------------------===**
__global__ void reduction(float *g_data, float *g_odata, int n)
{
__shared__ double sdata[1024];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = g_data[i];
__syncthreads();
/*
for (unsigned int s = 1 ; s < blockDim.x; s*=2){
int index = 2 * s * tid;
if (index < blockDim.x){
sdata [index] += sdata[index + s];
}
__syncthreads();
}
*/
for (unsigned int s = 1; s < blockDim.x; s*=2){
if (tid %(2*s) == 0){
sdata[tid] += sdata [tid + s];
}
__syncthreads();
}
/*
for (unsigned int s = blockDim.x/2; s> 0; s >>=1 ){
if (tid < s){
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
*/
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
#endif // #ifndef _VECTOR_REDUCTION_KERNEL_H_
|
d6484a850f2b57ffd4939dc8f8f4c9f10507487f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*
* stream3Streams.cu
*
* Formulation of stream2Async.cu that uses streams to overlap data
* transfers and kernel processing.
*
* Build with: nvcc -I ../chLib stream3Streams.cu
*
* Copyright (c) 2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <chError.h>
#include <chCommandLine.h>
#include <chTimer.h>
#include <stdio.h>
#include <stdlib.h>
#include "saxpyCPU.h"
#include "saxpyGPU.cuh"
hipError_t
MeasureTimes(
float *msTotal,
float *msWallClock,
size_t N,
float alpha,
int nStreams,
int nBlocks,
int nThreads )
{
hipError_t status;
chTimerTimestamp chStart, chStop;
float *dptrOut = 0, *hptrOut = 0;
float *dptrY = 0, *hptrY = 0;
float *dptrX = 0, *hptrX = 0;
hipStream_t *streams = 0;
hipEvent_t evStart = 0;
hipEvent_t evStop = 0;
size_t streamStep = N / nStreams;
if ( N % nStreams ) {
printf( "Stream count must be evenly divisible into N\n" );
status = hipErrorInvalidValue;
goto Error;
}
streams = new hipStream_t[nStreams];
if ( ! streams ) {
status = hipErrorMemoryAllocation;
goto Error;
}
memset( streams, 0, nStreams*sizeof(hipStream_t) );
for ( int i = 0; i < nStreams; i++ ) {
cuda(StreamCreate( &streams[i] ) );
}
cuda(HostAlloc( &hptrOut, N*sizeof(float), 0 ) );
memset( hptrOut, 0, N*sizeof(float) );
cuda(HostAlloc( &hptrY, N*sizeof(float), 0 ) );
cuda(HostAlloc( &hptrX, N*sizeof(float), 0 ) );
cuda(Malloc( &dptrOut, N*sizeof(float) ) );
cuda(Memset( dptrOut, 0, N*sizeof(float) ) );
cuda(Malloc( &dptrY, N*sizeof(float) ) );
cuda(Memset( dptrY, 0, N*sizeof(float) ) );
cuda(Malloc( &dptrX, N*sizeof(float) ) );
cuda(Memset( dptrY, 0, N*sizeof(float) ) );
cuda(EventCreate( &evStart ) );
cuda(EventCreate( &evStop ) );
chTimerGetTime( &chStart );
cuda(EventRecord( evStart, 0 ) );
for ( int iStream = 0; iStream < nStreams; iStream++ ) {
cuda(MemcpyAsync(
dptrX+iStream*streamStep,
hptrX+iStream*streamStep,
streamStep*sizeof(float),
hipMemcpyHostToDevice,
streams[iStream] ) );
cuda(MemcpyAsync(
dptrY+iStream*streamStep,
hptrY+iStream*streamStep,
streamStep*sizeof(float),
hipMemcpyHostToDevice,
streams[iStream] ) );
}
for ( int iStream = 0; iStream < nStreams; iStream++ ) {
hipLaunchKernelGGL(( saxpyGPU), dim3(nBlocks), dim3(nThreads), 0, streams[iStream],
dptrOut+iStream*streamStep,
dptrX+iStream*streamStep,
dptrY+iStream*streamStep,
streamStep,
alpha );
}
for ( int iStream = 0; iStream < nStreams; iStream++ ) {
cuda(MemcpyAsync(
hptrOut+iStream*streamStep,
dptrOut+iStream*streamStep,
streamStep*sizeof(float),
hipMemcpyDeviceToHost,
streams[iStream] ) );
}
cuda(EventRecord( evStop, 0 ) );
cuda(DeviceSynchronize() );
chTimerGetTime( &chStop );
*msWallClock = 1000.0f*chTimerElapsedTime( &chStart, &chStop );
for ( size_t i = 0; i < N; i++ ) {
if ( fabsf( hptrOut[i] - (alpha*hptrX[i]+hptrY[i]) ) > 1e-5f ) {
status = hipErrorUnknown;
goto Error;
}
}
cuda(EventElapsedTime( msTotal, evStart, evStop ) );
Error:
if ( streams ) {
for ( int i = 0; i < nStreams; i++ ) {
hipStreamDestroy( streams[i] );
}
delete[] streams;
}
hipEventDestroy( evStart );
hipEventDestroy( evStop );
hipFree( dptrOut );
hipFree( dptrX );
hipFree( dptrY );
hipHostFree( hptrOut );
hipHostFree( hptrX );
hipHostFree( hptrY );
return status;
}
double
Bandwidth( float ms, double NumBytes )
{
return NumBytes / (1000.0*ms);
}
int
main( int argc, char *argv[] )
{
hipError_t status;
int N_Mfloats = 128;
size_t N;
int maxStreams = 8;
int nBlocks = 1500;
int nThreads = 256;
float alpha = 2.0f;
chCommandLineGet( &nBlocks, "nBlocks", argc, argv );
chCommandLineGet( &nThreads, "nThreads", argc, argv );
if ( ! chCommandLineGet( &N_Mfloats, "N", argc, argv ) ) {
printf( " Usage: use --N to specify number of Mfloats)\n");
}
printf( "Measuring times with %dM floats\n", N_Mfloats );
if ( ! chCommandLineGet( &maxStreams, "maxStreams", argc, argv ) ) {
printf( "Testing with default max of %d streams "
"(set with --maxStreams <count>)\n", maxStreams );
}
printf( "\n" );
N = 1048576*N_Mfloats;
cuda(SetDeviceFlags( hipDeviceMapHost ) );
printf( "Streams\tTime (ms)\tMB/s\n" );
for ( int numStreams = 1; numStreams <= maxStreams; numStreams++ ) {
float msTotal, msWallClock;
size_t thisN = (N / numStreams)*numStreams;
CUDART_CHECK( MeasureTimes( &msTotal, &msWallClock, thisN, alpha, numStreams, nBlocks, nThreads ) );
printf( "%d\t%.2f ms\t%.2f\n", numStreams, msTotal, Bandwidth( msWallClock, 3*thisN*sizeof(float) ) );
}
Error:
if ( status == hipErrorMemoryAllocation ) {
printf( "Memory allocation failed\n" );
}
else if ( hipSuccess != status ) {
printf( "Failed\n" );
}
return hipSuccess != status;
}
| d6484a850f2b57ffd4939dc8f8f4c9f10507487f.cu | /*
*
* stream3Streams.cu
*
* Formulation of stream2Async.cu that uses streams to overlap data
* transfers and kernel processing.
*
* Build with: nvcc -I ../chLib stream3Streams.cu
*
* Copyright (c) 2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <chError.h>
#include <chCommandLine.h>
#include <chTimer.h>
#include <stdio.h>
#include <stdlib.h>
#include "saxpyCPU.h"
#include "saxpyGPU.cuh"
cudaError_t
MeasureTimes(
float *msTotal,
float *msWallClock,
size_t N,
float alpha,
int nStreams,
int nBlocks,
int nThreads )
{
cudaError_t status;
chTimerTimestamp chStart, chStop;
float *dptrOut = 0, *hptrOut = 0;
float *dptrY = 0, *hptrY = 0;
float *dptrX = 0, *hptrX = 0;
cudaStream_t *streams = 0;
cudaEvent_t evStart = 0;
cudaEvent_t evStop = 0;
size_t streamStep = N / nStreams;
if ( N % nStreams ) {
printf( "Stream count must be evenly divisible into N\n" );
status = cudaErrorInvalidValue;
goto Error;
}
streams = new cudaStream_t[nStreams];
if ( ! streams ) {
status = cudaErrorMemoryAllocation;
goto Error;
}
memset( streams, 0, nStreams*sizeof(cudaStream_t) );
for ( int i = 0; i < nStreams; i++ ) {
cuda(StreamCreate( &streams[i] ) );
}
cuda(HostAlloc( &hptrOut, N*sizeof(float), 0 ) );
memset( hptrOut, 0, N*sizeof(float) );
cuda(HostAlloc( &hptrY, N*sizeof(float), 0 ) );
cuda(HostAlloc( &hptrX, N*sizeof(float), 0 ) );
cuda(Malloc( &dptrOut, N*sizeof(float) ) );
cuda(Memset( dptrOut, 0, N*sizeof(float) ) );
cuda(Malloc( &dptrY, N*sizeof(float) ) );
cuda(Memset( dptrY, 0, N*sizeof(float) ) );
cuda(Malloc( &dptrX, N*sizeof(float) ) );
cuda(Memset( dptrY, 0, N*sizeof(float) ) );
cuda(EventCreate( &evStart ) );
cuda(EventCreate( &evStop ) );
chTimerGetTime( &chStart );
cuda(EventRecord( evStart, 0 ) );
for ( int iStream = 0; iStream < nStreams; iStream++ ) {
cuda(MemcpyAsync(
dptrX+iStream*streamStep,
hptrX+iStream*streamStep,
streamStep*sizeof(float),
cudaMemcpyHostToDevice,
streams[iStream] ) );
cuda(MemcpyAsync(
dptrY+iStream*streamStep,
hptrY+iStream*streamStep,
streamStep*sizeof(float),
cudaMemcpyHostToDevice,
streams[iStream] ) );
}
for ( int iStream = 0; iStream < nStreams; iStream++ ) {
saxpyGPU<<<nBlocks, nThreads, 0, streams[iStream]>>>(
dptrOut+iStream*streamStep,
dptrX+iStream*streamStep,
dptrY+iStream*streamStep,
streamStep,
alpha );
}
for ( int iStream = 0; iStream < nStreams; iStream++ ) {
cuda(MemcpyAsync(
hptrOut+iStream*streamStep,
dptrOut+iStream*streamStep,
streamStep*sizeof(float),
cudaMemcpyDeviceToHost,
streams[iStream] ) );
}
cuda(EventRecord( evStop, 0 ) );
cuda(DeviceSynchronize() );
chTimerGetTime( &chStop );
*msWallClock = 1000.0f*chTimerElapsedTime( &chStart, &chStop );
for ( size_t i = 0; i < N; i++ ) {
if ( fabsf( hptrOut[i] - (alpha*hptrX[i]+hptrY[i]) ) > 1e-5f ) {
status = cudaErrorUnknown;
goto Error;
}
}
cuda(EventElapsedTime( msTotal, evStart, evStop ) );
Error:
if ( streams ) {
for ( int i = 0; i < nStreams; i++ ) {
cudaStreamDestroy( streams[i] );
}
delete[] streams;
}
cudaEventDestroy( evStart );
cudaEventDestroy( evStop );
cudaFree( dptrOut );
cudaFree( dptrX );
cudaFree( dptrY );
cudaFreeHost( hptrOut );
cudaFreeHost( hptrX );
cudaFreeHost( hptrY );
return status;
}
double
Bandwidth( float ms, double NumBytes )
{
return NumBytes / (1000.0*ms);
}
int
main( int argc, char *argv[] )
{
cudaError_t status;
int N_Mfloats = 128;
size_t N;
int maxStreams = 8;
int nBlocks = 1500;
int nThreads = 256;
float alpha = 2.0f;
chCommandLineGet( &nBlocks, "nBlocks", argc, argv );
chCommandLineGet( &nThreads, "nThreads", argc, argv );
if ( ! chCommandLineGet( &N_Mfloats, "N", argc, argv ) ) {
printf( " Usage: use --N to specify number of Mfloats)\n");
}
printf( "Measuring times with %dM floats\n", N_Mfloats );
if ( ! chCommandLineGet( &maxStreams, "maxStreams", argc, argv ) ) {
printf( "Testing with default max of %d streams "
"(set with --maxStreams <count>)\n", maxStreams );
}
printf( "\n" );
N = 1048576*N_Mfloats;
cuda(SetDeviceFlags( cudaDeviceMapHost ) );
printf( "Streams\tTime (ms)\tMB/s\n" );
for ( int numStreams = 1; numStreams <= maxStreams; numStreams++ ) {
float msTotal, msWallClock;
size_t thisN = (N / numStreams)*numStreams;
CUDART_CHECK( MeasureTimes( &msTotal, &msWallClock, thisN, alpha, numStreams, nBlocks, nThreads ) );
printf( "%d\t%.2f ms\t%.2f\n", numStreams, msTotal, Bandwidth( msWallClock, 3*thisN*sizeof(float) ) );
}
Error:
if ( status == cudaErrorMemoryAllocation ) {
printf( "Memory allocation failed\n" );
}
else if ( cudaSuccess != status ) {
printf( "Failed\n" );
}
return cudaSuccess != status;
}
|
da3d45852ec0c3cb9aa42036808f366f700a21be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <limits>
#include <vector>
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
#ifdef USE_ROCM
template<typename Dtype>
__global__ void DropoutForward(const int n, const Dtype* in,
const unsigned int* mask,
const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
#endif // USE_ROCM
template<typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (this->phase_ == TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
if (this->phase_ == TRAIN) {
cl_mem mask = (cl_mem) (rand_vec_.mutable_gpu_data());
greentea_gpu_rng_uniform(this->device_context_->id(), count, mask, 0);
// set thresholds
viennacl::ocl::kernel &oclk_dropout = program.get_kernel(
CL_KERNEL_SELECT("dropout_forward"));
viennacl::ocl::enqueue(
oclk_dropout(count, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle(mask, &ctx), uint_thres_, scale_,
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
} else {
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0,
&ctx);
}
#endif // USE_GREENTEA
}
}
#ifdef USE_ROCM
template<typename Dtype>
__global__ void DropoutBackward(const int n, const Dtype* in_diff,
const unsigned int* mask,
const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
#endif // USE_ROCM
template<typename Dtype>
void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (this->phase_ == TRAIN) {
const unsigned int* mask = static_cast<const unsigned int*>(rand_vec_
.gpu_data());
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
if (this->phase_ == TRAIN) {
cl_mem mask = (cl_mem) (rand_vec_.gpu_data());
const int count = bottom[0]->count();
viennacl::ocl::kernel &oclk_dropout = program.get_kernel(
CL_KERNEL_SELECT("dropout_backward"));
viennacl::ocl::enqueue(
oclk_dropout(count, WrapHandle((cl_mem) top_diff, &ctx),
WrapHandle(mask, &ctx), uint_thres_, scale_,
WrapHandle((cl_mem) bottom_diff, &ctx)),
ctx.get_queue());
} else {
greentea_copy<Dtype>(top[0]->count(), (cl_mem) top_diff, 0,
(cl_mem) bottom_diff, 0, &ctx);
}
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer);
} // namespace caffe
| da3d45852ec0c3cb9aa42036808f366f700a21be.cu | #include <algorithm>
#include <limits>
#include <vector>
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
#ifdef USE_CUDA
template<typename Dtype>
__global__ void DropoutForward(const int n, const Dtype* in,
const unsigned int* mask,
const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
#endif // USE_CUDA
template<typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (this->phase_ == TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
if (this->phase_ == TRAIN) {
cl_mem mask = (cl_mem) (rand_vec_.mutable_gpu_data());
greentea_gpu_rng_uniform(this->device_context_->id(), count, mask, 0);
// set thresholds
viennacl::ocl::kernel &oclk_dropout = program.get_kernel(
CL_KERNEL_SELECT("dropout_forward"));
viennacl::ocl::enqueue(
oclk_dropout(count, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle(mask, &ctx), uint_thres_, scale_,
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
} else {
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0,
&ctx);
}
#endif // USE_GREENTEA
}
}
#ifdef USE_CUDA
template<typename Dtype>
__global__ void DropoutBackward(const int n, const Dtype* in_diff,
const unsigned int* mask,
const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
#endif // USE_CUDA
template<typename Dtype>
void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (this->phase_ == TRAIN) {
const unsigned int* mask = static_cast<const unsigned int*>(rand_vec_
.gpu_data());
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
if (this->phase_ == TRAIN) {
cl_mem mask = (cl_mem) (rand_vec_.gpu_data());
const int count = bottom[0]->count();
viennacl::ocl::kernel &oclk_dropout = program.get_kernel(
CL_KERNEL_SELECT("dropout_backward"));
viennacl::ocl::enqueue(
oclk_dropout(count, WrapHandle((cl_mem) top_diff, &ctx),
WrapHandle(mask, &ctx), uint_thres_, scale_,
WrapHandle((cl_mem) bottom_diff, &ctx)),
ctx.get_queue());
} else {
greentea_copy<Dtype>(top[0]->count(), (cl_mem) top_diff, 0,
(cl_mem) bottom_diff, 0, &ctx);
}
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer);
} // namespace caffe
|
3e4ac7870a2d9899c4370ac292a0390cd8342597.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef IMP_CU_BILATERAL_IMPL_CU
#define IMP_CU_BILATERAL_IMPL_CU
#include <imp/cu_imgproc/cu_image_filter.cuh>
#include <cstdint>
#include <hip/hip_runtime.h>
#include <imp/core/types.hpp>
#include <imp/core/roi.hpp>
#include <imp/cu_core/cu_image_gpu.cuh>
#include <imp/cu_core/cu_utils.hpp>
#include <imp/cu_core/cu_texture.cuh>
namespace imp {
namespace cu {
// ----------------------------------------------------------------------------
// kernel: bilateral filter kernel C1
__global__ void cuFilterBilateralKernel_32f_C1(const float* src, float* dst,
const float* prior,
const float sigma_spatial, const float sigma_range,
const int radius, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x + xoff;
int y = blockIdx.y*blockDim.y + threadIdx.y + yoff;
int c = y*stride+x;
float p = prior[c];
if(x<width && y<height)
{
float sum_g = 0.0f;
float sum_val = 0.0f;
for (int l=-radius; l<=radius; ++l)
{
for (int k=-radius; k<=radius; ++k)
{
int xx=x+k, yy=y+l;
if(xx>=0 && yy>=0 && xx<width && yy<height)
{
int cc = yy*stride+xx;
float g = expf(-((iu::sqr(x-xx)+iu::sqr(y-yy))/(2.0f*iu::sqr(sigma_spatial)))
-(iu::sqr(p-prior[cc])/(2.0f*iu::sqr(sigma_range))));
sum_g += g;
sum_val += g*src[cc];
}
}
}
dst[c] = sum_val / IUMAX(1e-6f, sum_g);
}
}
// ----------------------------------------------------------------------------
// kernel: bilateral filter kernel C1 with C4 prior
__global__ void cuFilterBilateralKernel_32f_C1C4(const float* src, float* dst,
const float4* prior,
const float sigma_spatial, const float sigma_range,
const int radius,
const size_t stride1, const size_t stride4,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x + xoff;
int y = blockIdx.y*blockDim.y + threadIdx.y + yoff;
float4 p = prior[y*stride4+x];
if(x<width && y<height)
{
float sum_g = 0.0f;
float sum_val = 0.0f;
for (int l=-radius; l<=radius; ++l)
{
for (int k=-radius; k<=radius; ++k)
{
int xx=x+k, yy=y+l;
if(xx>=0 && yy>=0 && xx<width && yy<height)
{
float4 diff = p-prior[yy*stride4+xx];
float g = expf(-((iu::sqr(x-xx)+iu::sqr(y-yy))/(2*iu::sqr(sigma_spatial)))
-(dot(diff,diff)/(2*iu::sqr(sigma_range))));
sum_g += g;
sum_val += g*src[y*stride1+x];
}
}
}
dst[y*stride1+x] = sum_val / IUMAX(1e-6f, sum_g);
}
}
// ----------------------------------------------------------------------------
// kernel: bilateral filter kernel C4
__global__ void cuFilterBilateralKernel_32f_C4(const float4* src, float4* dst,
const float4* prior,
float sigma_spatial, const float sigma_range,
const int radius, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x + xoff;
int y = blockIdx.y*blockDim.y + threadIdx.y + yoff;
int c = y*stride+x;
float4 p = prior[c];
if(x<width && y<height)
{
float sum_g = 0.0f;
float4 sum_val = make_float4(0.0f);
for (int l=-radius; l<=radius; ++l)
{
for (int k=-radius; k<=radius; ++k)
{
int xx=x+k, yy=y+l;
if(xx>=0 && yy>=0 && xx<width && yy<height)
{
int cc = yy*stride+xx;
float4 diff = p-prior[cc];
float g = expf(-((iu::sqr(x-xx)+iu::sqr(y-yy))/(2*iu::sqr(sigma_spatial)))
-(dot(diff,diff)/(2*iu::sqr(sigma_range))));
sum_g += g;
sum_val += g*src[cc];
}
}
}
dst[c] = sum_val / IUMAX(1e-6f, sum_g);
}
}
// ----------------------------------------------------------------------------
// wrapper: bilateral filter, C1
void cuFilterBilateral(const iu::ImageGpu_32f_C1* src, iu::ImageGpu_32f_C1* dst, const IuRect& roi,
const iu::ImageGpu_32f_C1* prior, const int iters,
const float sigma_spatial, const float sigma_range,
const int radius)
{
float min,max;
iu::minMax(src, src->roi(), min, max);
printf("src min/max=%f/%f\n", min, max);
iu::minMax(prior, src->roi(), min, max);
printf("prior min/max=%f/%f\n", min, max);
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(roi.width, dimBlock.x), iu::divUp(roi.height, dimBlock.y));
// filter iterations
for (int iter=0; iter<iters; ++iter)
{
hipLaunchKernelGGL(( cuFilterBilateralKernel_32f_C1)
, dim3(dimGrid), dim3(dimBlock) , 0, 0, src->data(), dst->data(), prior->data(),
sigma_spatial, sigma_range, radius,
src->stride(), roi.x, roi.y, roi.width, roi.height);
}
IU_CUDA_CHECK();
}
// wrapper: bilateral filter, C1 and C4 prior
void cuFilterBilateral(const iu::ImageGpu_32f_C1* src, iu::ImageGpu_32f_C1* dst, const IuRect& roi,
const iu::ImageGpu_32f_C4* prior, const int iters,
const float sigma_spatial, const float sigma_range,
const int radius)
{
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(roi.width, dimBlock.x), iu::divUp(roi.height, dimBlock.y));
// filter iterations
for (int iter=0; iter<iters; ++iter)
{
hipLaunchKernelGGL(( cuFilterBilateralKernel_32f_C1C4)
, dim3(dimGrid), dim3(dimBlock) , 0, 0, src->data(), dst->data(), prior->data(),
sigma_spatial, sigma_range, radius,
src->stride(), prior->stride(),
roi.x, roi.y, roi.width, roi.height);
}
IU_CUDA_CHECK();
}
// wrapper: bilateral filter, C4
void cuFilterBilateral(const iu::ImageGpu_32f_C4* src, iu::ImageGpu_32f_C4* dst, const IuRect& roi,
const iu::ImageGpu_32f_C4* prior, const int iters,
const float sigma_spatial, const float sigma_range,
const int radius)
{
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(roi.width, dimBlock.x), iu::divUp(roi.height, dimBlock.y));
// filter iterations
for (int iter=0; iter<iters; ++iter)
{
hipLaunchKernelGGL(( cuFilterBilateralKernel_32f_C4)
, dim3(dimGrid), dim3(dimBlock) , 0, 0, src->data(), dst->data(), prior->data(),
sigma_spatial, sigma_range, radius,
src->stride(), roi.x, roi.y, roi.width, roi.height);
}
IU_CUDA_CHECK();
}
} // namespace cu
} // namespace imp
#endif IMP_CU_BILATERAL_IMPL_CU
| 3e4ac7870a2d9899c4370ac292a0390cd8342597.cu | #ifndef IMP_CU_BILATERAL_IMPL_CU
#define IMP_CU_BILATERAL_IMPL_CU
#include <imp/cu_imgproc/cu_image_filter.cuh>
#include <cstdint>
#include <cuda_runtime.h>
#include <imp/core/types.hpp>
#include <imp/core/roi.hpp>
#include <imp/cu_core/cu_image_gpu.cuh>
#include <imp/cu_core/cu_utils.hpp>
#include <imp/cu_core/cu_texture.cuh>
namespace imp {
namespace cu {
// ----------------------------------------------------------------------------
// kernel: bilateral filter kernel C1
__global__ void cuFilterBilateralKernel_32f_C1(const float* src, float* dst,
const float* prior,
const float sigma_spatial, const float sigma_range,
const int radius, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x + xoff;
int y = blockIdx.y*blockDim.y + threadIdx.y + yoff;
int c = y*stride+x;
float p = prior[c];
if(x<width && y<height)
{
float sum_g = 0.0f;
float sum_val = 0.0f;
for (int l=-radius; l<=radius; ++l)
{
for (int k=-radius; k<=radius; ++k)
{
int xx=x+k, yy=y+l;
if(xx>=0 && yy>=0 && xx<width && yy<height)
{
int cc = yy*stride+xx;
float g = expf(-((iu::sqr(x-xx)+iu::sqr(y-yy))/(2.0f*iu::sqr(sigma_spatial)))
-(iu::sqr(p-prior[cc])/(2.0f*iu::sqr(sigma_range))));
sum_g += g;
sum_val += g*src[cc];
}
}
}
dst[c] = sum_val / IUMAX(1e-6f, sum_g);
}
}
// ----------------------------------------------------------------------------
// kernel: bilateral filter kernel C1 with C4 prior
__global__ void cuFilterBilateralKernel_32f_C1C4(const float* src, float* dst,
const float4* prior,
const float sigma_spatial, const float sigma_range,
const int radius,
const size_t stride1, const size_t stride4,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x + xoff;
int y = blockIdx.y*blockDim.y + threadIdx.y + yoff;
float4 p = prior[y*stride4+x];
if(x<width && y<height)
{
float sum_g = 0.0f;
float sum_val = 0.0f;
for (int l=-radius; l<=radius; ++l)
{
for (int k=-radius; k<=radius; ++k)
{
int xx=x+k, yy=y+l;
if(xx>=0 && yy>=0 && xx<width && yy<height)
{
float4 diff = p-prior[yy*stride4+xx];
float g = expf(-((iu::sqr(x-xx)+iu::sqr(y-yy))/(2*iu::sqr(sigma_spatial)))
-(dot(diff,diff)/(2*iu::sqr(sigma_range))));
sum_g += g;
sum_val += g*src[y*stride1+x];
}
}
}
dst[y*stride1+x] = sum_val / IUMAX(1e-6f, sum_g);
}
}
// ----------------------------------------------------------------------------
// kernel: bilateral filter kernel C4
__global__ void cuFilterBilateralKernel_32f_C4(const float4* src, float4* dst,
const float4* prior,
float sigma_spatial, const float sigma_range,
const int radius, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x + xoff;
int y = blockIdx.y*blockDim.y + threadIdx.y + yoff;
int c = y*stride+x;
float4 p = prior[c];
if(x<width && y<height)
{
float sum_g = 0.0f;
float4 sum_val = make_float4(0.0f);
for (int l=-radius; l<=radius; ++l)
{
for (int k=-radius; k<=radius; ++k)
{
int xx=x+k, yy=y+l;
if(xx>=0 && yy>=0 && xx<width && yy<height)
{
int cc = yy*stride+xx;
float4 diff = p-prior[cc];
float g = expf(-((iu::sqr(x-xx)+iu::sqr(y-yy))/(2*iu::sqr(sigma_spatial)))
-(dot(diff,diff)/(2*iu::sqr(sigma_range))));
sum_g += g;
sum_val += g*src[cc];
}
}
}
dst[c] = sum_val / IUMAX(1e-6f, sum_g);
}
}
// ----------------------------------------------------------------------------
// wrapper: bilateral filter, C1
void cuFilterBilateral(const iu::ImageGpu_32f_C1* src, iu::ImageGpu_32f_C1* dst, const IuRect& roi,
const iu::ImageGpu_32f_C1* prior, const int iters,
const float sigma_spatial, const float sigma_range,
const int radius)
{
float min,max;
iu::minMax(src, src->roi(), min, max);
printf("src min/max=%f/%f\n", min, max);
iu::minMax(prior, src->roi(), min, max);
printf("prior min/max=%f/%f\n", min, max);
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(roi.width, dimBlock.x), iu::divUp(roi.height, dimBlock.y));
// filter iterations
for (int iter=0; iter<iters; ++iter)
{
cuFilterBilateralKernel_32f_C1
<<< dimGrid, dimBlock >>> (src->data(), dst->data(), prior->data(),
sigma_spatial, sigma_range, radius,
src->stride(), roi.x, roi.y, roi.width, roi.height);
}
IU_CUDA_CHECK();
}
// wrapper: bilateral filter, C1 and C4 prior
void cuFilterBilateral(const iu::ImageGpu_32f_C1* src, iu::ImageGpu_32f_C1* dst, const IuRect& roi,
const iu::ImageGpu_32f_C4* prior, const int iters,
const float sigma_spatial, const float sigma_range,
const int radius)
{
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(roi.width, dimBlock.x), iu::divUp(roi.height, dimBlock.y));
// filter iterations
for (int iter=0; iter<iters; ++iter)
{
cuFilterBilateralKernel_32f_C1C4
<<< dimGrid, dimBlock >>> (src->data(), dst->data(), prior->data(),
sigma_spatial, sigma_range, radius,
src->stride(), prior->stride(),
roi.x, roi.y, roi.width, roi.height);
}
IU_CUDA_CHECK();
}
// wrapper: bilateral filter, C4
void cuFilterBilateral(const iu::ImageGpu_32f_C4* src, iu::ImageGpu_32f_C4* dst, const IuRect& roi,
const iu::ImageGpu_32f_C4* prior, const int iters,
const float sigma_spatial, const float sigma_range,
const int radius)
{
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(roi.width, dimBlock.x), iu::divUp(roi.height, dimBlock.y));
// filter iterations
for (int iter=0; iter<iters; ++iter)
{
cuFilterBilateralKernel_32f_C4
<<< dimGrid, dimBlock >>> (src->data(), dst->data(), prior->data(),
sigma_spatial, sigma_range, radius,
src->stride(), roi.x, roi.y, roi.width, roi.height);
}
IU_CUDA_CHECK();
}
} // namespace cu
} // namespace imp
#endif IMP_CU_BILATERAL_IMPL_CU
|
5e624d3684ec1390c19a2a6ea462c52abf51ec63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#define THREADS_PER_BLOCK 64
__global__ void MatrixMul( float *Md , float *Nd , float *Pd , const int WIDTH )
{
int COL = threadIdx.x + blockIdx.x * blockDim.x;
int ROW = threadIdx.y + blockIdx.y * blockDim.y;
if (ROW < WIDTH && COL < WIDTH) {
for (int i = 0; i < WIDTH; i++) {
Pd[ROW * WIDTH + COL] += Md[ROW * WIDTH + i] * Nd [i * WIDTH + COL];
}
}
}
int main(int arg0, char *arg1[]) {
hipDeviceSynchronize();
int WIDTH = atoi(arg1[1]);
int sqrtThreads = sqrt(THREADS_PER_BLOCK);
int nBlocks = WIDTH/sqrtThreads;
if (WIDTH % sqrtThreads != 0)
{
nBlocks++;
}
dim3 grid(nBlocks, nBlocks, 1);
dim3 block(sqrtThreads, sqrtThreads, 1);
float *a_h, *b_h, *c_h, *d_h, *a_d, *b_d, *c_d;
int size;
hipEvent_t start;
hipEvent_t stop;
float elapsed1;
size = WIDTH * WIDTH * sizeof(float);
a_h = (float*) malloc(size);
b_h = (float*) malloc(size);
c_h = (float*) malloc(size);
d_h = (float*) malloc(size);
for (int i = 0; i < WIDTH; i++)
{
for (int j = 0; j < WIDTH; j++)
{
a_h[i * WIDTH + j] = i;
b_h[i * WIDTH + j] = i;
}
}
hipMalloc((void**)&a_d, size);
hipMalloc((void**)&b_d, size);
hipMalloc((void**)&c_d, size);
hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice);
hipMemcpy(b_d, b_h, size, hipMemcpyHostToDevice);
hipMemcpy(c_d, c_h, size, hipMemcpyHostToDevice);
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( MatrixMul), dim3(grid), dim3(block), 0, 0, a_d, b_d, c_d, WIDTH);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed1, start, stop);
printf("%f\n", elapsed1/1000);
hipMemcpy(c_h, c_d, size, hipMemcpyDeviceToHost);
free(a_h);
free(b_h);
free(c_h);
free(d_h);
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
| 5e624d3684ec1390c19a2a6ea462c52abf51ec63.cu | #include <stdio.h>
#include <math.h>
#define THREADS_PER_BLOCK 64
__global__ void MatrixMul( float *Md , float *Nd , float *Pd , const int WIDTH )
{
int COL = threadIdx.x + blockIdx.x * blockDim.x;
int ROW = threadIdx.y + blockIdx.y * blockDim.y;
if (ROW < WIDTH && COL < WIDTH) {
for (int i = 0; i < WIDTH; i++) {
Pd[ROW * WIDTH + COL] += Md[ROW * WIDTH + i] * Nd [i * WIDTH + COL];
}
}
}
int main(int arg0, char *arg1[]) {
cudaThreadSynchronize();
int WIDTH = atoi(arg1[1]);
int sqrtThreads = sqrt(THREADS_PER_BLOCK);
int nBlocks = WIDTH/sqrtThreads;
if (WIDTH % sqrtThreads != 0)
{
nBlocks++;
}
dim3 grid(nBlocks, nBlocks, 1);
dim3 block(sqrtThreads, sqrtThreads, 1);
float *a_h, *b_h, *c_h, *d_h, *a_d, *b_d, *c_d;
int size;
cudaEvent_t start;
cudaEvent_t stop;
float elapsed1;
size = WIDTH * WIDTH * sizeof(float);
a_h = (float*) malloc(size);
b_h = (float*) malloc(size);
c_h = (float*) malloc(size);
d_h = (float*) malloc(size);
for (int i = 0; i < WIDTH; i++)
{
for (int j = 0; j < WIDTH; j++)
{
a_h[i * WIDTH + j] = i;
b_h[i * WIDTH + j] = i;
}
}
cudaMalloc((void**)&a_d, size);
cudaMalloc((void**)&b_d, size);
cudaMalloc((void**)&c_d, size);
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c_h, size, cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
MatrixMul<<<grid, block>>>(a_d, b_d, c_d, WIDTH);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed1, start, stop);
printf("%f\n", elapsed1/1000);
cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost);
free(a_h);
free(b_h);
free(c_h);
free(d_h);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
53ae9df12c362581f8c76d0e429e9b5b926320c1.hip | // !!! This is a file automatically generated by hipify!!!
#if 0
#######################################################################################
# The MIT License
# Copyright (c) 2014 Hannes Schulz, University of Bonn <schulz@ais.uni-bonn.de>
# Copyright (c) 2013 Benedikt Waldvogel, University of Bonn <mail@bwaldvogel.de>
# Copyright (c) 2008-2009 Sebastian Nowozin <nowozin@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#######################################################################################
#endif
#include "random_tree_image_gpu.h"
#include <boost/format.hpp>
#include <hip/hip_runtime_api.h>
#include <hiprand/hiprand_kernel.h>
#include <set>
#include <tbb/mutex.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include "random_tree_image.h"
#include "score.h"
#include "utils.h"
namespace curfil {
// must be global
texture<float, hipTextureType2DLayered, hipReadModeElementType> colorTexture;
texture<int, hipTextureType2DLayered, hipReadModeElementType> depthTexture;
texture<float, hipTextureType2DLayered, hipReadModeElementType> treeTexture;
tbb::mutex initMutex;
volatile bool initialized = false;
tbb::mutex textureMutex;
const static int NUM_STREAMS = 2;
hipStream_t streams[NUM_STREAMS] = { NULL, NULL };
ImageCache imageCache;
TreeCache treeCache;
__device__
float getColorChannelValue(int x, int y, int imageNr, int channel) {
assert(channel < colorChannels);
return tex2DLayered(colorTexture, x, y, imageNr * colorChannels + channel);
}
__device__
int getDepthValue(int x, int y, int imageNr) {
return tex2DLayered(depthTexture, x, y, imageNr * depthChannels + depthChannel);
}
__device__
int getDepthValidValue(int x, int y, int imageNr) {
return tex2DLayered(depthTexture, x, y, imageNr * depthChannels + depthValidChannel);
}
__device__
FeatureResponseType averageRegionDepth(int imageNr,
const int16_t imageWidth, const int16_t imageHeight,
int leftX, int rightX, int upperY, int lowerY) {
if (leftX < 0 || rightX >= imageWidth || upperY < 0 || lowerY >= imageHeight) {
return nan("");
}
int upperLeftValid = getDepthValidValue(leftX, upperY, imageNr);
int upperRightValid = getDepthValidValue(rightX, upperY, imageNr);
int lowerRightValid = getDepthValidValue(rightX, lowerY, imageNr);
int lowerLeftValid = getDepthValidValue(leftX, lowerY, imageNr);
int numValid = (lowerRightValid - upperRightValid) + (upperLeftValid - lowerLeftValid);
assert(numValid >= 0 && numValid <= (rightX - leftX) * (lowerY - upperY));
if (numValid == 0) {
return nan("");
}
int upperLeftDepth = getDepthValue(leftX, upperY, imageNr);
int upperRightDepth = getDepthValue(rightX, upperY, imageNr);
int lowerRightDepth = getDepthValue(rightX, lowerY, imageNr);
int lowerLeftDepth = getDepthValue(leftX, lowerY, imageNr);
int sum = (lowerRightDepth - upperRightDepth) + (upperLeftDepth - lowerLeftDepth);
FeatureResponseType feat = sum / static_cast<FeatureResponseType>(1000);
return (feat / numValid);
}
__device__
FeatureResponseType averageRegionDepth(int imageNr,
const int16_t imageWidth, const int16_t imageHeight,
float depth,
int sampleX, int sampleY,
int offsetX, int offsetY,
int regionWidth, int regionHeight) {
int width = max(1, static_cast<int>(regionWidth / depth));
int height = max(1, static_cast<int>(regionHeight / depth));
int x = sampleX + static_cast<int>(offsetX / depth);
int y = sampleY + static_cast<int>(offsetY / depth);
int leftX = x - width;
int rightX = x + width;
int upperY = y - height;
int lowerY = y + height;
return averageRegionDepth(imageNr, imageWidth, imageHeight, leftX, rightX, upperY, lowerY);
}
__device__
FeatureResponseType averageRegionColor(int imageNr,
uint16_t imageWidth, uint16_t imageHeight,
int channel, float depth,
int sampleX, int sampleY,
int offsetX, int offsetY,
int regionWidth, int regionHeight) {
int width = max(1, static_cast<int>(regionWidth / depth));
int height = max(1, static_cast<int>(regionHeight / depth));
int x = sampleX + static_cast<int>(offsetX / depth);
int y = sampleY + static_cast<int>(offsetY / depth);
int leftX = x - width;
int rightX = x + width;
int upperY = y - height;
int lowerY = y + height;
if (leftX < 0 || rightX >= imageWidth || upperY < 0 || lowerY >= imageHeight) {
return nan("");
}
FeatureResponseType upperLeftPixel = getColorChannelValue(leftX, upperY, imageNr, channel);
FeatureResponseType upperRightPixel = getColorChannelValue(rightX, upperY, imageNr, channel);
FeatureResponseType lowerRightPixel = getColorChannelValue(rightX, lowerY, imageNr, channel);
FeatureResponseType lowerLeftPixel = getColorChannelValue(leftX, lowerY, imageNr, channel);
if (isnan(lowerRightPixel) || isnan(lowerLeftPixel) || isnan(upperRightPixel) || isnan(upperLeftPixel))
return nan("");
FeatureResponseType sum = (lowerRightPixel - upperRightPixel) + (upperLeftPixel - lowerLeftPixel);
return sum;
}
__device__
FeatureResponseType calculateDepthFeature(int imageNr,
int16_t imageWidth, int16_t imageHeight,
int8_t offset1X, int8_t offset1Y,
int8_t offset2X, int8_t offset2Y,
int8_t region1X, int8_t region1Y,
int8_t region2X, int8_t region2Y,
int sampleX, int sampleY, float depth) {
FeatureResponseType a = averageRegionDepth(imageNr, imageWidth, imageHeight, depth, sampleX, sampleY, offset1X,
offset1Y, region1X, region1Y);
if (isnan(a))
return a;
FeatureResponseType b = averageRegionDepth(imageNr, imageWidth, imageHeight, depth, sampleX, sampleY, offset2X,
offset2Y, region2X, region2Y);
if (isnan(b))
return b;
return (a - b);
}
__device__
FeatureResponseType calculateColorFeature(int imageNr,
const int16_t imageWidth, const int16_t imageHeight,
int8_t offset1X, int8_t offset1Y,
int8_t offset2X, int8_t offset2Y,
int8_t region1X, int8_t region1Y,
int8_t region2X, int8_t region2Y,
int8_t channel1, int8_t channel2,
int sampleX, int sampleY, float depth) {
assert(channel1 >= 0 && channel1 < 3);
assert(channel2 >= 0 && channel2 < 3);
FeatureResponseType a = averageRegionColor(imageNr, imageWidth, imageHeight, channel1, depth, sampleX, sampleY,
offset1X, offset1Y, region1X, region1Y);
if (isnan(a))
return a;
FeatureResponseType b = averageRegionColor(imageNr, imageWidth, imageHeight, channel2, depth, sampleX, sampleY,
offset2X, offset2Y, region2X, region2Y);
if (isnan(b))
return b;
return (a - b);
}
__global__
void setupRandomStatesKernel(unsigned long long seed, hiprandState_t* state, unsigned int numFeatures) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < numFeatures) {
/* Each thread gets same seed, a different sequence number, no offset */
hiprand_init(seed, id, 0, &state[id]);
}
}
__device__
void randomOffset(hiprandState_t* state, int8_t* x, int8_t* y, const uint8_t radius) {
const uint8_t boxRadius = radius + 1;
const int8_t vx = hiprand_uniform(state) * 2 * boxRadius - boxRadius;
const int8_t vy = hiprand_uniform(state) * 2 * boxRadius - boxRadius;
assert(vx >= -boxRadius && vx <= boxRadius);
assert(vy >= -boxRadius && vy <= boxRadius);
*x = vx;
*y = vy;
}
__device__
void randomRegion(hiprandState_t* state, int8_t* x, int8_t* y, const uint8_t regionSize) {
const int8_t vx = hiprand_uniform(state) * regionSize + 1;
const int8_t vy = hiprand_uniform(state) * regionSize + 1;
assert(vx >= 1 && vx <= regionSize);
assert(vy >= 1 && vy <= regionSize);
*x = vx;
*y = vy;
}
__global__
void generateRandomFeaturesKernel(int seed,
unsigned int numFeatures,
int* keys, int* indices,
uint16_t boxRadius,
uint16_t regionSize,
int8_t* types,
int8_t* offsets1X, int8_t* offsets1Y,
int8_t* regions1X, int8_t* regions1Y,
int8_t* offsets2X, int8_t* offsets2Y,
int8_t* regions2X, int8_t* regions2Y,
int8_t* channels1, int8_t* channels2,
float* thresholds,
unsigned int numThresholds,
unsigned int numSamples,
int imageWidth, int imageHeight,
int* imageNumbers,
float* depths,
int* sampleX,
int* sampleY,
uint8_t* sampleLabel,
bool isUseDepthImages) {
int feat = blockIdx.x * blockDim.x + threadIdx.x;
if (feat >= numFeatures) {
return;
}
hiprandState_t localState;
hiprand_init(seed, feat, 0, &localState);
uint8_t type;
int8_t offset1X, offset1Y;
int8_t offset2X, offset2Y;
int8_t region1X, region1Y;
int8_t region2X, region2Y;
uint8_t channel1, channel2;
if (isUseDepthImages)
type = static_cast<uint8_t>(feat >= numFeatures / 2);
else
type = COLOR;
types[feat] = type;
randomOffset(&localState, &offset1X, &offset1Y, boxRadius);
randomRegion(&localState, ®ion1X, ®ion1Y, regionSize);
do {
randomOffset(&localState, &offset2X, &offset2Y, boxRadius);
randomRegion(&localState, ®ion2X, ®ion2Y, regionSize);
} while (offset1X == offset2X && offset1Y == offset2Y);
if (type == COLOR) {
// chan1=MOD(INT(A2/(100/2)*3);3)
// chan2=MOD(INT(A2/(100/2/3)*3);3)
if (isUseDepthImages) {
channel1 = feat / (numFeatures / 2.0) * 3;
channel1 %= 3;
channel2 = feat / (numFeatures / 2.0 / 3) * 3;
channel2 %= 3;
}
else
{
channel1 = feat / (numFeatures / 1.0 ) * 3;
channel1 %= 3;
channel2 = feat / (numFeatures / 1.0 / 3) * 3;
channel2 %= 3;
}
// channel1 = hiprand_uniform(&localState) * 3;
// channel2 = hiprand_uniform(&localState) * 3;
} else {
channel1 = 0;
channel2 = 0;
}
for (unsigned int thresh = 0; thresh < numThresholds; thresh++) {
unsigned int numSample = hiprand_uniform(&localState) * (numSamples - 1);
FeatureResponseType featureResponse;
switch (type) {
case COLOR:
featureResponse = calculateColorFeature(imageNumbers[numSample],
imageWidth, imageHeight,
offset1X, offset1Y,
offset2X, offset2Y,
region1X, region1Y,
region2X, region2Y,
channel1, channel2,
sampleX[numSample], sampleY[numSample], depths[numSample]);
break;
case DEPTH:
featureResponse = calculateDepthFeature(imageNumbers[numSample],
imageWidth, imageHeight,
offset1X, offset1Y,
offset2X, offset2Y,
region1X, region1Y,
region2X, region2Y,
sampleX[numSample], sampleY[numSample], depths[numSample]);
break;
default:
assert(false);
break;
}
if (isnan(featureResponse)) {
featureResponse = 0.0;
}
thresholds[thresh * numFeatures + feat] = featureResponse;
}
int32_t sortKey = 0;
sortKey |= static_cast<uint8_t>(type & 0x03) << 30; // 2 bit for the type
sortKey |= static_cast<uint8_t>(channel1 & 0x0F) << 26; // 4 bit for channel1
sortKey |= static_cast<uint8_t>(channel2 & 0x0F) << 22; // 4 bit for channel2
sortKey |= static_cast<uint8_t>((offset1Y + 127) & 0xFF) << 14; // 8 bit for offset1.y
sortKey |= static_cast<uint8_t>((offset1X + 127) & 0xFF) << 6; // 8 bit for offset1.x
keys[feat] = sortKey;
assert(keys[feat] >= 0);
indices[feat] = feat;
offsets1X[feat] = offset1X;
offsets1Y[feat] = offset1Y;
regions1X[feat] = region1X;
regions1Y[feat] = region1Y;
offsets2X[feat] = offset2X;
offsets2Y[feat] = offset2Y;
regions2X[feat] = region2X;
regions2Y[feat] = region2Y;
channels1[feat] = channel1;
channels2[feat] = channel2;
}
Samples<cuv::dev_memory_space> ImageFeatureEvaluation::copySamplesToDevice(
const std::vector<const PixelInstance*>& samples, hipStream_t stream) {
imageCache.copyImages(configuration.getImageCacheSize(), samples);
cudaSafeCall(hipStreamSynchronize(stream));
utils::Profile p("copySamplesToDevice");
Samples<cuv::host_memory_space> samplesOnHost(samples.size(), sampleDataAllocator);
for (size_t i = 0; i < samples.size(); i++) {
const PixelInstance* sample = samples[i];
samplesOnHost.imageNumbers[i] = imageCache.getElementPos(sample->getRGBDImage());
samplesOnHost.depths[i] = sample->getDepth().getFloatValue();
samplesOnHost.sampleX[i] = sample->getX();
samplesOnHost.sampleY[i] = sample->getY();
samplesOnHost.labels[i] = sample->getLabel();
samplesOnHost.horFlipSetting[i] = sample->getHorFlipSetting();
}
utils::Timer copySamplesAssignTimer;
Samples<cuv::dev_memory_space> samplesOnDevice(samplesOnHost, stream);
cudaSafeCall(hipStreamSynchronize(stream));
return samplesOnDevice;
}
void clearImageCache() {
CURFIL_INFO("clearing image cache");
imageCache.clear();
}
TreeNodes::TreeNodes(const TreeNodes& other) :
m_treeId(other.getTreeId()),
m_numNodes(other.numNodes()),
m_numLabels(other.numLabels()),
m_sizePerNode(other.sizePerNode()),
m_data(other.data())
{
}
TreeNodes::TreeNodes(const boost::shared_ptr<const RandomTree<PixelInstance, ImageFeatureFunction> >& tree) :
m_treeId(tree->getTreeId()),
m_numNodes(tree->countNodes()),
m_numLabels(tree->getNumClasses()),
m_sizePerNode(offsetHistograms + sizeof(float) * m_numLabels),
m_data(LAYERS_PER_TREE * NODES_PER_TREE_LAYER, m_sizePerNode)
{
assert(offsetHistograms == 24);
const unsigned int MAX_NODES = LAYERS_PER_TREE * NODES_PER_TREE_LAYER;
if (m_numNodes > MAX_NODES) {
throw std::runtime_error((boost::format("too many nodes in tree %d: %d (max: %d)")
% tree->getTreeId() % m_numNodes % MAX_NODES).str());
}
convert(tree);
assert(m_numLabels == tree->getHistogram().size());
}
template<class T>
void TreeNodes::setValue(size_t node, size_t offset, const T& value) {
const size_t layer = node / NODES_PER_TREE_LAYER;
const size_t nodeOffset = node % NODES_PER_TREE_LAYER;
assert(layer * NODES_PER_TREE_LAYER + nodeOffset == node);
if (layer >= LAYERS_PER_TREE) {
throw std::runtime_error((boost::format("illegal layer: %d (node: %d)")
% layer % node).str());
}
T* ptr = reinterpret_cast<T*>(m_data.ptr()
+ layer * NODES_PER_TREE_LAYER * m_sizePerNode
+ nodeOffset * m_sizePerNode + offset);
// CURFIL_DEBUG("setting value for node " << node << " at pos (" << layer << "," << nodeOffset << "," << offset << ") to " << static_cast<double>(value));
*ptr = value;
}
void TreeNodes::setLeftNodeOffset(size_t node, int offset) {
setValue(node, offsetLeftNode, offset);
}
void TreeNodes::setThreshold(size_t node, float threshold) {
setValue(node, offsetThreshold, threshold);
}
void TreeNodes::setHistogramValue(size_t node, size_t label, float value) {
assert(offsetHistograms == 6 * sizeof(float));
setValue(node, offsetHistograms + label * sizeof(value), value);
}
void TreeNodes::setType(size_t node, int8_t value) {
setValue(node, offsetTypes, static_cast<int>(value));
}
void TreeNodes::setOffset1X(size_t node, int8_t value) {
setValue(node, offsetFeatures + 0 * sizeof(value), value);
}
void TreeNodes::setOffset1Y(size_t node, int8_t value) {
setValue(node, offsetFeatures + 1 * sizeof(value), value);
}
void TreeNodes::setRegion1X(size_t node, int8_t value) {
setValue(node, offsetFeatures + 2 * sizeof(value), value);
}
void TreeNodes::setRegion1Y(size_t node, int8_t value) {
setValue(node, offsetFeatures + 3 * sizeof(value), value);
}
void TreeNodes::setOffset2X(size_t node, int8_t value) {
setValue(node, offsetFeatures + 4 * sizeof(value), value);
}
void TreeNodes::setOffset2Y(size_t node, int8_t value) {
setValue(node, offsetFeatures + 5 * sizeof(value), value);
}
void TreeNodes::setRegion2X(size_t node, int8_t value) {
setValue(node, offsetFeatures + 6 * sizeof(value), value);
}
void TreeNodes::setRegion2Y(size_t node, int8_t value) {
setValue(node, offsetFeatures + 7 * sizeof(value), value);
}
void TreeNodes::setChannel1(size_t node, uint16_t value) {
setValue(node, offsetChannels + 0 * sizeof(value), value);
}
void TreeNodes::setChannel2(size_t node, uint16_t value) {
setValue(node, offsetChannels + 1 * sizeof(value), value);
}
void TreeNodes::convert(const boost::shared_ptr<const RandomTree<PixelInstance, ImageFeatureFunction> >& tree) {
size_t offset = tree->getNodeId() - tree->getTreeId();
if (offset >= m_numNodes) {
throw std::runtime_error((boost::format("tree %d, illegal offset: %d (numNodes: %d)")
% tree->getTreeId() % offset % m_numNodes).str());
}
// could be limited to the leaf-node case
const cuv::ndarray<double, cuv::host_memory_space>& histogram = tree->getNormalizedHistogram();
assert(histogram.ndim() == 1);
assert(histogram.shape(0) == m_numLabels);
for (size_t label = 0; label < histogram.shape(0); label++) {
setHistogramValue(offset, label, static_cast<float>(histogram(label)));
}
if (tree->isLeaf()) {
setLeftNodeOffset(offset, -1);
setThreshold(offset, std::numeric_limits<float>::quiet_NaN());
return;
}
// decision node
const ImageFeatureFunction& feature = tree->getSplit().getFeature();
setType(offset, static_cast<int8_t>(feature.getType()));
setOffset1X(offset, static_cast<int8_t>(feature.getOffset1().getX()));
setOffset1Y(offset, static_cast<int8_t>(feature.getOffset1().getY()));
setRegion1X(offset, static_cast<int8_t>(feature.getRegion1().getX()));
setRegion1Y(offset, static_cast<int8_t>(feature.getRegion1().getY()));
setOffset2X(offset, static_cast<int8_t>(feature.getOffset2().getX()));
setOffset2Y(offset, static_cast<int8_t>(feature.getOffset2().getY()));
setRegion2X(offset, static_cast<int8_t>(feature.getRegion2().getX()));
setRegion2Y(offset, static_cast<int8_t>(feature.getRegion2().getY()));
setChannel1(offset, static_cast<int8_t>(feature.getChannel1()));
setChannel2(offset, static_cast<int8_t>(feature.getChannel2()));
setThreshold(offset, tree->getSplit().getThreshold());
convert(tree->getLeft());
convert(tree->getRight());
// tree nodes must be already in breadth-first order
assert(tree->getRight()->getNodeId() == tree->getLeft()->getNodeId() + 1);
const int leftNodeOffset = tree->getLeft()->getNodeId() - tree->getNodeId();
assert(leftNodeOffset > 0);
setLeftNodeOffset(offset, leftNodeOffset);
}
void DeviceCache::setBound(bool bound) {
assert(bound != this->bound);
this->bound = bound;
}
DeviceCache::~DeviceCache() {
// clear() must be called in destructor of derived class
assert(!bound);
assert(elementTimes.empty());
assert(elementIdMap.empty());
assert(currentTime == 0);
}
bool DeviceCache::containsElement(const void* element) const {
assert(element);
return (elementIdMap.find(element) != elementIdMap.end());
}
size_t DeviceCache::getElementPos(const void* element) const {
assert(element);
std::map<const void*, size_t>::const_iterator it = elementIdMap.find(element);
if (it == elementIdMap.end()) {
throw std::runtime_error(getElementName(element) + " not found in cache");
}
return it->second;
}
void DeviceCache::clear() {
if (bound) {
unbind();
}
freeArray();
elementTimes.clear();
elementIdMap.clear();
currentTime = 0;
}
void DeviceCache::copyElements(size_t cacheSize, const std::set<const void*>& elements) {
if (elements.empty())
return;
if (elements.size() > cacheSize) {
throw std::runtime_error(boost::str(boost::format("too many images: %d. max: %d")
% elements.size()
% cacheSize));
}
if (cacheSize != this->cacheSize) {
clear();
this->cacheSize = cacheSize;
}
currentTime++;
size_t numTransferred = 0;
const boost::posix_time::ptime start = boost::posix_time::microsec_clock::local_time();
std::set<const void*>::const_iterator it;
for (it = elements.begin(); it != elements.end(); it++) {
const void* element = *it;
size_t elementPos = 0;
// check if image is already there
if (elementIdMap.find(element) != elementIdMap.end()) {
elementPos = elementIdMap[element];
// update LRU vector time
elementTimes[elementPos] = currentTime;
CURFIL_DEBUG(getElementName(element) << " already in device cache");
continue;
}
// element does not exist yet. transfer it
CURFIL_DEBUG(getElementName(element) << " not yet on device. transferring");
if (elementIdMap.size() < cacheSize) {
elementPos = elementIdMap.size();
} else {
// find least recently used element
size_t oldestTime = currentTime;
std::map<size_t, size_t>::const_iterator it;
for (it = elementTimes.begin(); it != elementTimes.end(); it++) {
if (it->second < oldestTime) {
oldestTime = it->second;
elementPos = it->first;
}
}
assert(oldestTime < currentTime);
CURFIL_DEBUG("replacing " << getElementName(element)
<< " (time: " << oldestTime << ", current: " << currentTime << ")");
{
std::map<const void*, size_t>::iterator it;
for (it = elementIdMap.begin(); it != elementIdMap.end(); it++) {
if (it->second == elementPos) {
CURFIL_DEBUG("removing " << getElementName(it->first) << " at pos " << elementPos);
elementIdMap.erase(it);
break;
}
}
}
}
elementIdMap[element] = elementPos;
elementTimes[elementPos] = currentTime;
CURFIL_DEBUG("transfer " << getElementName(element) << " to pos " << elementPos);
if (bound) {
unbind();
}
transferElement(elementPos, element, streams[0]);
numTransferred++;
}
if (numTransferred > 0) {
CURFIL_DEBUG("transferred " << numTransferred << "/" << elements.size() << " "
<< getElementsName() << " from host to device");
cudaSafeCall(hipStreamSynchronize(streams[0]));
const boost::posix_time::ptime stop = boost::posix_time::microsec_clock::local_time();
totalTransferTimeMicroseconds += (stop - start).total_microseconds();
}
if (!bound) {
bind();
}
}
void DeviceCache::updateCacheSize(size_t cacheSize) {
if (cacheSize != this->cacheSize) {
clear();
this->cacheSize = cacheSize;
}
}
ImageCache::~ImageCache() {
CURFIL_DEBUG("destroying image cache " << this);
clear();
}
void ImageCache::freeArray() {
assert(!isBound());
if (colorTextureData != NULL) {
hipFreeArray(colorTextureData);
colorTextureData = NULL;
}
if (depthTextureData != NULL) {
hipFreeArray(depthTextureData);
depthTextureData = NULL;
}
}
void ImageCache::allocArray() {
assert(!isBound());
unsigned int flags = hipArrayLayered;
assert(colorTextureData == NULL);
assert(depthTextureData == NULL);
assert(this->width > 0);
assert(this->height > 0);
assert(getCacheSize() > 0);
{
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipExtent extent = make_hipExtent(width, height, colorChannels * getCacheSize());
cudaSafeCall(hipMalloc3DArray(&colorTextureData, &channelDesc, extent, flags));
}
{
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindSigned);
hipExtent extent = make_hipExtent(width, height, depthChannels * getCacheSize());
cudaSafeCall(hipMalloc3DArray(&depthTextureData, &channelDesc, extent, flags));
}
}
ImageCache::ImageCache() :
DeviceCache(), width(0), height(0), colorTextureData(NULL), depthTextureData(NULL) {
}
void ImageCache::copyImages(size_t cacheSize, const std::vector<const PixelInstance*>& samples) {
std::set<const RGBDImage*> images;
for (size_t sample = 0; sample < samples.size(); sample++) {
const RGBDImage* image = samples[sample]->getRGBDImage();
images.insert(image);
}
copyImages(cacheSize, images);
}
void ImageCache::copyImages(size_t cacheSize, const std::set<const RGBDImage*>& images) {
if (images.empty())
return;
int width = (*images.begin())->getWidth();
int height = (*images.begin())->getHeight();
#ifndef NDEBUG
{
std::set<const RGBDImage*>::const_iterator it;
for (it = images.begin(); it != images.end(); it++) {
assert(width == (*it)->getWidth());
assert(height == (*it)->getHeight());
}
}
#endif
if (width != this->width || height != this->height) {
this->width = width;
this->height = height;
clear();
}
updateCacheSize(cacheSize);
if (colorTextureData == NULL) {
allocArray();
}
std::set<const void*> elements;
std::set<const RGBDImage*>::const_iterator it;
for (it = images.begin(); it != images.end(); it++) {
elements.insert(*it);
}
copyElements(cacheSize, elements);
}
void ImageCache::transferElement(size_t imagePos, const void* imagePtr, hipStream_t stream) {
const RGBDImage* image = reinterpret_cast<const RGBDImage*>(imagePtr);
struct hipMemcpy3DParms colorCopyParams;
memset(&colorCopyParams, 0, sizeof(colorCopyParams));
colorCopyParams.extent = make_hipExtent(width, height, colorChannels);
colorCopyParams.kind = hipMemcpyHostToDevice;
colorCopyParams.dstArray = colorTextureData;
struct hipMemcpy3DParms depthCopyParams;
memset(&depthCopyParams, 0, sizeof(depthCopyParams));
depthCopyParams.extent = make_hipExtent(width, height, depthChannels);
depthCopyParams.kind = hipMemcpyHostToDevice;
depthCopyParams.dstArray = depthTextureData;
assert(image->getColorImage().ndim() == 3);
assert(image->getColorImage().shape(0) == static_cast<unsigned int>(colorChannels));
assert(image->getColorImage().shape(1) == static_cast<unsigned int>(height));
assert(image->getColorImage().shape(2) == static_cast<unsigned int>(width));
colorCopyParams.dstPos = make_hipPos(0, 0, colorChannels * imagePos);
colorCopyParams.srcPtr = make_hipPitchedPtr(
const_cast<void*>(reinterpret_cast<const void*>(image->getColorImage().ptr())),
sizeof(float) * width, width, height);
cudaSafeCall(hipMemcpy3DAsync(&colorCopyParams, stream));
assert(image->getDepthImage().ndim() == 3);
assert(image->getDepthImage().shape(0) == static_cast<unsigned int>(depthChannels));
assert(image->getDepthImage().shape(1) == static_cast<unsigned int>(height));
assert(image->getDepthImage().shape(2) == static_cast<unsigned int>(width));
depthCopyParams.dstPos = make_hipPos(0, 0, depthChannels * imagePos);
depthCopyParams.srcPtr = make_hipPitchedPtr(
const_cast<void*>(reinterpret_cast<const void*>(image->getDepthImage().ptr())),
sizeof(int) * width, width, height);
cudaSafeCall(hipMemcpy3DAsync(&depthCopyParams, stream));
}
std::string ImageCache::getElementName(const void* imagePtr) const {
const RGBDImage* image = reinterpret_cast<const RGBDImage*>(imagePtr);
return (boost::format("image %p") % image).str();
}
std::string ImageCache::getElementsName() const {
return "images";
}
void ImageCache::bind() {
assert(!isBound());
colorTexture.normalized = false;
colorTexture.filterMode = hipFilterModePoint;
colorTexture.addressMode[0] = hipAddressModeClamp;
colorTexture.addressMode[1] = hipAddressModeClamp;
colorTexture.addressMode[2] = hipAddressModeClamp;
assert(colorTextureData != NULL);
cudaSafeCall(hipBindTextureToArray(colorTexture, colorTextureData));
depthTexture.normalized = false;
depthTexture.filterMode = hipFilterModePoint;
depthTexture.addressMode[0] = hipAddressModeClamp;
depthTexture.addressMode[1] = hipAddressModeClamp;
depthTexture.addressMode[2] = hipAddressModeClamp;
assert(depthTextureData != NULL);
cudaSafeCall(hipBindTextureToArray(depthTexture, depthTextureData));
setBound(true);
}
void ImageCache::unbind() {
assert(isBound());
hipUnbindTexture(colorTexture);
hipUnbindTexture(depthTexture);
setBound(false);
}
TreeCache::~TreeCache() {
CURFIL_DEBUG("destroying tree cache " << this);
clear();
}
void TreeCache::freeArray() {
assert(!isBound());
if (treeTextureData != NULL) {
hipFreeArray(treeTextureData);
treeTextureData = NULL;
}
}
void TreeCache::allocArray() {
assert(!isBound());
assert(treeTextureData == NULL);
assert(sizePerNode > 0);
assert(numLabels > 0);
assert(getCacheSize() > 0);
CURFIL_INFO("tree cache: allocating " << getCacheSize() << " x " << LAYERS_PER_TREE << " x "
<< NODES_PER_TREE_LAYER << " x " << sizePerNode << " bytes");
{
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipExtent extent = make_hipExtent(sizePerNode / sizeof(float), NODES_PER_TREE_LAYER,
LAYERS_PER_TREE * getCacheSize());
cudaSafeCall(hipMalloc3DArray(&treeTextureData, &channelDesc, extent, hipArrayLayered));
}
}
TreeCache::TreeCache() :
DeviceCache(), sizePerNode(0), numLabels(0),
treeTextureData(NULL) {
}
void TreeCache::copyTree(size_t cacheSize, const TreeNodes* tree) {
std::set<const TreeNodes*> trees;
trees.insert(tree);
copyTrees(cacheSize, trees);
}
void TreeCache::copyTrees(size_t cacheSize, const std::set<const TreeNodes*>& trees) {
if (trees.empty())
return;
const size_t sizePerNode = (*trees.begin())->sizePerNode();
const LabelType numLabels = (*trees.begin())->numLabels();
{
std::set<const TreeNodes*>::const_iterator it;
for (it = trees.begin(); it != trees.end(); it++) {
assert(sizePerNode == (*it)->sizePerNode());
assert(numLabels == (*it)->numLabels());
}
}
if (numLabels != this->numLabels || sizePerNode != this->sizePerNode) {
this->numLabels = numLabels;
this->sizePerNode = sizePerNode;
clear();
}
updateCacheSize(cacheSize);
if (treeTextureData == NULL) {
allocArray();
}
std::set<const void*> elements;
std::set<const TreeNodes*>::const_iterator it;
for (it = trees.begin(); it != trees.end(); it++) {
elements.insert(*it);
}
copyElements(cacheSize, elements);
}
void TreeCache::transferElement(size_t elementPos, const void* element, hipStream_t stream) {
assert(!isBound());
utils::Profile profile("transferTree");
const TreeNodes* tree = reinterpret_cast<const TreeNodes*>(element);
struct hipMemcpy3DParms copyParams;
memset(©Params, 0, sizeof(copyParams));
copyParams.kind = hipMemcpyHostToDevice;
copyParams.dstArray = treeTextureData;
assert(elementPos < getCacheSize());
const size_t layers = ceil(tree->numNodes() / static_cast<double>(NODES_PER_TREE_LAYER));
assert(layers >= 1);
assert(layers <= LAYERS_PER_TREE);
copyParams.dstPos = make_hipPos(0, 0, elementPos * LAYERS_PER_TREE);
copyParams.extent = make_hipExtent(sizePerNode / sizeof(float), NODES_PER_TREE_LAYER, layers);
void* ptr = const_cast<void*>(reinterpret_cast<const void*>(tree->data().ptr()));
CURFIL_INFO("transfer " << getElementName(element) << " to pos " << elementPos
<< " (layer " << elementPos * LAYERS_PER_TREE << ")"
<< " with " << tree->numNodes() << " nodes in " << layers << " layers");
assert(tree->data().size() == sizePerNode * NODES_PER_TREE_LAYER * LAYERS_PER_TREE);
copyParams.srcPtr = make_hipPitchedPtr(ptr, sizePerNode, sizePerNode / sizeof(float), NODES_PER_TREE_LAYER);
cudaSafeCall(hipMemcpy3DAsync(©Params, stream));
}
std::string TreeCache::getElementName(const void* element) const {
const TreeNodes* tree = reinterpret_cast<const TreeNodes*>(element);
return (boost::format("tree %d (%p)") % tree->getTreeId() % tree).str();
}
std::string TreeCache::getElementsName() const {
return "trees";
}
void TreeCache::bind() {
assert(!isBound());
treeTexture.normalized = false;
treeTexture.filterMode = hipFilterModePoint;
treeTexture.addressMode[0] = hipAddressModeClamp;
treeTexture.addressMode[1] = hipAddressModeClamp;
treeTexture.addressMode[2] = hipAddressModeClamp;
assert(treeTextureData != NULL);
cudaSafeCall(hipBindTextureToArray(treeTexture, treeTextureData));
setBound(true);
}
void TreeCache::unbind() {
assert(isBound());
hipUnbindTexture(treeTexture);
setBound(false);
}
__device__
static size_t featureResponseOffset(size_t sample, size_t feature,
size_t numSamples, size_t numFeatures) {
// XXX: also need to change pointer arithmetic in aggregateHistogramsKernel
// return sample * numFeatures + feature;
return feature * numSamples + sample;
}
__device__
static unsigned int counterOffset(unsigned int label, unsigned int value, unsigned int threshold, unsigned int feature,
unsigned int numLabels, unsigned int numFeatures, unsigned int numThresholds) {
assert(value == 0 || value == 1);
assert(label < numLabels);
assert(feature < numFeatures);
assert(threshold < numThresholds);
// size_t index = (2 * label + value) * numFeatures * numThresholds + threshold * numFeatures + feature;
// features thresholds labels 2
unsigned int index = feature * numThresholds * numLabels * 2;
index += threshold * numLabels * 2;
index += label * 2;
index += value;
return index;
}
__device__
int getNodeOffset(int node, int tree) {
return node % NODES_PER_TREE_LAYER;
}
__device__
int getLayer(int node, int tree) {
return tree * LAYERS_PER_TREE + node / NODES_PER_TREE_LAYER;
}
__device__
int getLeftNodeOffset(int node, int tree) {
float v = tex2DLayered(treeTexture, 0, getNodeOffset(node, tree), getLayer(node, tree));
return (*reinterpret_cast<int*>(&v));
}
__device__
int getType(int node, int tree) {
float v = tex2DLayered(treeTexture, 1, getNodeOffset(node, tree), getLayer(node, tree));
return (*reinterpret_cast<int*>(&v));
}
__device__
char4 getParam1(int node, int tree) {
float v = tex2DLayered(treeTexture, 2, getNodeOffset(node, tree), getLayer(node, tree));
return (*reinterpret_cast<char4*>(&v));
}
__device__
char4 getParam2(int node, int tree) {
float v = tex2DLayered(treeTexture, 3, getNodeOffset(node, tree), getLayer(node, tree));
return (*reinterpret_cast<char4*>(&v));
}
__device__
ushort2 getChannels(int node, int tree) {
float v = tex2DLayered(treeTexture, 4, getNodeOffset(node, tree), getLayer(node, tree));
return (*reinterpret_cast<ushort2*>(&v));
}
__device__
float getThreshold(int node, int tree) {
return tex2DLayered(treeTexture, 5, getNodeOffset(node, tree), getLayer(node, tree));
}
__device__
float getHistogramValue(int label, int node, int tree) {
return tex2DLayered(treeTexture, 6 + label, getNodeOffset(node, tree), getLayer(node, tree));
}
// for the unit test
__global__ void fetchTreeNodeData(
int* leftNodeOffset,
int* type,
int8_t* offset1X, int8_t* offset1Y,
int8_t* region1X, int8_t* region1Y,
int8_t* offset2X, int8_t* offset2Y,
int8_t* region2X, int8_t* region2Y,
int8_t* channel1, int8_t* channel2,
float* threshold,
float* histogram,
const int node, const int tree, const int numLabels) {
assert(threadIdx.x == 0);
assert(blockDim.y == 1);
assert(blockDim.x == 1);
*leftNodeOffset = getLeftNodeOffset(node, tree);
*type = getType(node, tree);
char4 param1 = getParam1(node, tree);
*offset1X = param1.x;
*offset1Y = param1.y;
*region1X = param1.z;
*region1Y = param1.w;
char4 param2 = getParam2(node, tree);
*offset2X = param2.x;
*offset2Y = param2.y;
*region2X = param2.z;
*region2Y = param2.w;
ushort2 channels = getChannels(node, tree);
*channel1 = channels.x;
*channel2 = channels.y;
*threshold = getThreshold(node, tree);
for (int label = 0; label < numLabels; label++) {
histogram[label] = getHistogramValue(label, node, tree);
}
}
// for the unit test
TreeNodeData getTreeNode(const int nodeNr, const boost::shared_ptr<const TreeNodes>& treeData) {
treeCache.copyTree(3, treeData.get());
const size_t nodeOffset = nodeNr - treeData->getTreeId();
const size_t numLabels = treeData->numLabels();
cuv::ndarray<int, cuv::dev_memory_space> leftNodeOffset(1);
cuv::ndarray<int, cuv::dev_memory_space> type(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> offset1X(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> offset1Y(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> region1X(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> region1Y(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> offset2X(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> offset2Y(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> region2X(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> region2Y(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> channel1(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> channel2(1);
cuv::ndarray<float, cuv::dev_memory_space> threshold(1);
cuv::ndarray<float, cuv::dev_memory_space> histogram(numLabels);
int treeNr = treeCache.getElementPos(treeData.get());
hipLaunchKernelGGL(( fetchTreeNodeData), dim3(1),dim3(1), 0, 0, leftNodeOffset.ptr(),
type.ptr(),
offset1X.ptr(), offset1Y.ptr(),
region1X.ptr(), region1Y.ptr(),
offset2X.ptr(), offset2Y.ptr(),
region2X.ptr(), region2Y.ptr(),
channel1.ptr(), channel2.ptr(),
threshold.ptr(), histogram.ptr(),
nodeOffset, treeNr, numLabels);
cudaSafeCall(hipDeviceSynchronize());
TreeNodeData data;
data.leftNodeOffset = leftNodeOffset[0];
data.type = type[0];
data.offset1X = offset1X[0];
data.offset1Y = offset1Y[0];
data.region1X = region1X[0];
data.region1Y = region1Y[0];
data.offset2X = offset2X[0];
data.offset2Y = offset2Y[0];
data.region2X = region2X[0];
data.region2Y = region2Y[0];
data.channel1 = channel1[0];
data.channel2 = channel2[0];
data.threshold = threshold[0];
data.histogram = cuv::ndarray<float, cuv::host_memory_space>(numLabels);
for (size_t label = 0; label < numLabels; label++) {
data.histogram[label] = histogram(label);
}
return data;
}
__global__ void classifyKernel(
float* output, int tree,
const int16_t imageWidth, const int16_t imageHeight,
const LabelType numLabels, bool useDepthImages) {
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= imageWidth) {
return;
}
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y >= imageHeight) {
return;
}
float depth;
// depth might be nan here
if (useDepthImages)
depth = averageRegionDepth(0, imageWidth, imageHeight, x, x + 1, y, y + 1);
else
depth = 1;
int currentNodeOffset = 0;
while (true) {
const int16_t leftNodeOffset = getLeftNodeOffset(currentNodeOffset, tree);
assert(leftNodeOffset == -1 || leftNodeOffset > 0);
if (leftNodeOffset < 0) {
assert(isnan(getThreshold(currentNodeOffset, tree)));
for (LabelType label = 0; label < numLabels; label++) {
float v = getHistogramValue(label, currentNodeOffset, tree);
assert(!isnan(v));
assert(v >= 0.0);
output[label * imageWidth * imageHeight + y * imageWidth + x] += v;
}
// leaf node
return;
}
char4 param1 = getParam1(currentNodeOffset, tree);
int8_t offset1X = param1.x;
int8_t offset1Y = param1.y;
int8_t region1X = param1.z;
int8_t region1Y = param1.w;
char4 param2 = getParam2(currentNodeOffset, tree);
int8_t offset2X = param2.x;
int8_t offset2Y = param2.y;
int8_t region2X = param2.z;
int8_t region2Y = param2.w;
FeatureResponseType featureResponse;
switch (getType(currentNodeOffset, tree)) {
case COLOR: {
ushort2 channels = getChannels(currentNodeOffset, tree);
featureResponse = calculateColorFeature(0,
imageWidth, imageHeight,
offset1X, offset1Y,
offset2X, offset2Y,
region1X, region1Y,
region2X, region2Y,
channels.x, channels.y,
x, y, depth);
}
break;
case DEPTH:
// assert(false);
featureResponse = calculateDepthFeature(0,
imageWidth, imageHeight,
offset1X, offset1Y,
offset2X, offset2Y,
region1X, region1Y,
region2X, region2Y,
x, y, depth);
break;
}
float threshold = getThreshold(currentNodeOffset, tree);
assert(!isnan(threshold));
int value = static_cast<int>(!(featureResponse <= threshold));
currentNodeOffset += leftNodeOffset + value;
}
}
__global__ void normalizeProbabilitiesKernel(float* probabilities, int numLabels, int width, int height) {
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= width) {
return;
}
const unsigned int y = blockIdx.y;
assert(y < height);
float sum = 0.0;
for (int label = 0; label < numLabels; label++) {
sum += probabilities[label * width * height + y * width + x];
}
if (sum == 0) {
return;
}
for (int label = 0; label < numLabels; label++) {
probabilities[label * width * height + y * width + x] /= sum;
}
}
__global__ void maxProbabilitiesKernel(const float* probabilities, LabelType* output, int numLabels, int width,
int height) {
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= width) {
return;
}
const unsigned int y = blockIdx.y;
assert(y < height);
LabelType maxLabel = 0;
float max = 0.0;
for (LabelType label = 0; label < numLabels; label++) {
const float probability = probabilities[label * width * height + y * width + x];
if (probability > max) {
max = probability;
maxLabel = label;
}
}
output[y * width + x] = maxLabel;
}
void normalizeProbabilities(cuv::ndarray<float, cuv::dev_memory_space>& probabilities) {
utils::Profile profileClassifyImage("normalizeProbabilities");
hipStream_t stream = streams[0];
const unsigned int numLabels = probabilities.shape(0);
const unsigned int height = probabilities.shape(1);
const unsigned int width = probabilities.shape(2);
unsigned int threadsPerBlock = ::min(width, 128u);
int blocks = ::ceil(width / static_cast<float>(threadsPerBlock));
dim3 threads(threadsPerBlock);
dim3 blockSize(blocks, height);
hipLaunchKernelGGL(( normalizeProbabilitiesKernel), dim3(blockSize), dim3(threads), 0, stream, probabilities.ptr(), numLabels, width, height);
cudaSafeCall(hipStreamSynchronize(stream));
}
void determineMaxProbabilities(const cuv::ndarray<float, cuv::dev_memory_space>& probabilities,
cuv::ndarray<LabelType, cuv::dev_memory_space>& output) {
utils::Profile profileClassifyImage("determineMaxProbabilities");
const unsigned int numLabels = probabilities.shape(0);
const unsigned int height = probabilities.shape(1);
const unsigned int width = probabilities.shape(2);
assert(output.shape(0) == height);
assert(output.shape(1) == width);
hipStream_t stream = streams[0];
unsigned int threadsPerBlock = ::min(width, 128u);
int blocks = ::ceil(width / static_cast<float>(threadsPerBlock));
dim3 threads(threadsPerBlock);
dim3 blockSize(blocks, height);
hipLaunchKernelGGL(( maxProbabilitiesKernel), dim3(blockSize), dim3(threads), 0, stream, probabilities.ptr(), output.ptr(), numLabels, width, height);
cudaSafeCall(hipStreamSynchronize(stream));
}
void classifyImage(int treeCacheSize, cuv::ndarray<float, cuv::dev_memory_space>& output, const RGBDImage& image,
LabelType numLabels, const boost::shared_ptr<const TreeNodes>& treeData, bool useDepthImages) {
std::set<const RGBDImage*> images;
images.insert(&image);
tbb::mutex::scoped_lock lock(textureMutex);
utils::Profile profileClassifyImage("classifyImage");
imageCache.copyImages(1, images);
hipStream_t stream = streams[0];
assert(output.shape(0) == numLabels);
assert(output.shape(1) == static_cast<unsigned int>(image.getHeight()));
assert(output.shape(2) == static_cast<unsigned int>(image.getWidth()));
const int threadsPerRow = 8;
const int threadsPerColumn = 16;
int blocksX = ::ceil(image.getWidth() / static_cast<float>(threadsPerRow));
int blocksY = ::ceil(image.getHeight() / static_cast<float>(threadsPerColumn));
dim3 threads(threadsPerRow, threadsPerColumn);
dim3 blockSize(blocksX, blocksY);
treeCache.copyTree(treeCacheSize, treeData.get());
size_t tree = treeCache.getElementPos(treeData.get());
utils::Profile profileClassifyImageKernel("classifyImageKernel");
cudaSafeCall(hipFuncSetCacheConfig(classifyKernel, hipFuncCachePreferL1));
hipLaunchKernelGGL(( classifyKernel), dim3(blockSize), dim3(threads), 0, stream, output.ptr(), tree,
image.getWidth(), image.getHeight(),
numLabels, useDepthImages);
cudaSafeCall(hipStreamSynchronize(stream));
}
__global__ void featureResponseKernel(
FeatureResponseType* featureResponses1,
FeatureResponseType* featureResponses2,
const int8_t* types,
const int16_t imageWidth, const int16_t imageHeight,
const int8_t* offsets1X, const int8_t* offsets1Y,
const int8_t* offsets2X, const int8_t* offsets2Y,
const int8_t* regions1X, const int8_t* regions1Y,
const int8_t* regions2X, const int8_t* regions2Y,
const int8_t* channels1, const int8_t* channels2,
const int* samplesX, const int* samplesY, const float* depths,
const int* imageNumbers, const HorizontalFlipSetting* sampleHorFlipSetting, unsigned int numFeatures, unsigned int numSamples) {
unsigned int feature = blockIdx.x * blockDim.y + threadIdx.y;
unsigned int sample = blockIdx.y * blockDim.x + threadIdx.x;
if (feature >= numFeatures || sample >= numSamples) {
return;
}
int8_t type = types[feature];
assert(type == COLOR || type == DEPTH);
int8_t offset1X = offsets1X[feature];
int8_t offset1Y = offsets1Y[feature];
int8_t offset2X = offsets2X[feature];
int8_t offset2Y = offsets2Y[feature];
int8_t region1X = regions1X[feature];
int8_t region1Y = regions1Y[feature];
int8_t region2X = regions2X[feature];
int8_t region2Y = regions2Y[feature];
int imageNr = imageNumbers[sample];
FeatureResponseType featureResponse1;
FeatureResponseType featureResponse2 = 0;
HorizontalFlipSetting horFlipSetting = sampleHorFlipSetting[sample];
if (horFlipSetting == Flip)
{
offset1X = -offset1X;
offset2X = -offset2X;
}
switch (type) {
case COLOR:
{ featureResponse1 = calculateColorFeature(imageNr,
imageWidth, imageHeight,
offset1X, offset1Y,
offset2X, offset2Y,
region1X, region1Y,
region2X, region2Y,
channels1[feature], channels2[feature],
samplesX[sample], samplesY[sample], depths[sample]);
if (horFlipSetting == Both) {
featureResponse2 = calculateColorFeature(imageNr,
imageWidth, imageHeight,
-offset1X, offset1Y,
-offset2X, offset2Y,
region1X, region1Y,
region2X, region2Y,
channels1[feature], channels2[feature],
samplesX[sample], samplesY[sample], depths[sample]);}}
break;
case DEPTH:
{ featureResponse1 = calculateDepthFeature(imageNr,
imageWidth, imageHeight,
offset1X, offset1Y,
offset2X, offset2Y,
region1X, region1Y,
region2X, region2Y,
samplesX[sample], samplesY[sample], depths[sample]);
if (horFlipSetting == Both) {
featureResponse2 = calculateDepthFeature(imageNr,
imageWidth, imageHeight,
-offset1X, offset1Y,
-offset2X, offset2Y,
region1X, region1Y,
region2X, region2Y,
samplesX[sample], samplesY[sample], depths[sample]);}}
break;
default:
assert(false);
break;
}
featureResponses1[featureResponseOffset(sample, feature, numSamples, numFeatures)] = featureResponse1;
if (horFlipSetting == Both)
{featureResponses2[featureResponseOffset(sample, feature, numSamples, numFeatures)] = featureResponse2;}
}
// http://stackoverflow.com/questions/600293/how-to-check-if-a-number-is-a-power-of-2
#ifndef NDEBUG
__device__
static bool isPowerOfTwo(size_t x) {
return (x != 0) && ((x & (x - 1)) == 0);
}
#endif
__global__ void scoreKernel(const WeightType* counters,
const float* thresholds,
unsigned int numThresholds,
unsigned int numLabels,
unsigned int numFeatures,
const WeightType* allClasses,
ScoreType* scores) {
unsigned int feature = blockIdx.x * blockDim.x + threadIdx.x;
if (feature >= numFeatures) {
return;
}
unsigned int thresh = blockIdx.y;
WeightType totals[2] = { 0, 0 };
for (unsigned int label = 0; label < numLabels; label++) {
for (unsigned int value = 0; value < 2; value++) {
unsigned int cidx = counterOffset(label, value, thresh, feature, numLabels, numFeatures,
numThresholds);
WeightType counter = counters[cidx];
totals[value] += counter;
}
}
const WeightType* leftClasses = counters
+ counterOffset(0, 0, thresh, feature, numLabels, numFeatures, numThresholds);
const WeightType* rightClasses = counters
+ counterOffset(0, 1, thresh, feature, numLabels, numFeatures, numThresholds);
assert(rightClasses == leftClasses + 1);
unsigned int leftRightStride = 2;
#ifndef NDEBUG
unsigned int off0 = counterOffset(0, 0, thresh, feature, numLabels, numFeatures, numThresholds);
unsigned int off1 = counterOffset(1, 0, thresh, feature, numLabels, numFeatures, numThresholds);
assert(leftRightStride == off1 - off0);
#endif
ScoreType score = NormalizedInformationGainScore::calculateScore(numLabels, leftClasses, rightClasses,
leftRightStride, allClasses, static_cast<ScoreType>(totals[0]), static_cast<ScoreType>(totals[1]));
scores[thresh * numFeatures + feature] = score;
}
__global__ void aggregateHistogramsKernel(
const FeatureResponseType* featureResponses1,
const FeatureResponseType* featureResponses2,
WeightType* counters,
const float* thresholds,
const uint8_t* sampleLabel,
const HorizontalFlipSetting* sampleHorFlipSetting,
unsigned int numThresholds,
unsigned int numLabels,
unsigned int numFeatures,
unsigned int numSamples
) {
#ifndef NDEBUG
const unsigned int COUNTER_MAX = 0xFFFF;
#endif
// shape: 2 * numLabels * threadsPerBlock
extern __shared__ unsigned short counterShared[];
unsigned int feature = blockIdx.y;
unsigned int thresh = blockIdx.x;
assert(feature < numFeatures);
assert(thresh < numThresholds);
unsigned int offset = thresh * numFeatures + feature;
const float threshold = thresholds[offset];
// initialize shared memory
// every thread must initialize 2*numLabels counters with zero
for (unsigned int i = threadIdx.x; i < 2 * numLabels * blockDim.x; i += blockDim.x) {
counterShared[i] = 0;
}
__syncthreads();
unsigned int labelFlags = 0;
// iterate over all samples and increment the according counter in shared memory
const FeatureResponseType* resultPtr1 = featureResponses1
+ featureResponseOffset(threadIdx.x, feature, numSamples, numFeatures);
const FeatureResponseType* resultPtr2 = featureResponses2
+ featureResponseOffset(threadIdx.x, feature, numSamples, numFeatures);
for (unsigned int sample = threadIdx.x; sample < numSamples; sample += blockDim.x) {
FeatureResponseType featureResponse1 = *resultPtr1;
resultPtr1 += blockDim.x; // need to change if featureResponseOffset calculation changes
uint8_t label = sampleLabel[sample];
HorizontalFlipSetting horFlipSetting = sampleHorFlipSetting[sample];
assert(label < numLabels);
assert(label < 32);
labelFlags |= 1 << label;
int value = static_cast<int>(!(featureResponse1 <= threshold));
assert(value == 0 || value == 1);
assert(counterShared[(2 * label) * blockDim.x + 2 * threadIdx.x + value] < COUNTER_MAX);
counterShared[(2 * label) * blockDim.x + 2 * threadIdx.x + value]++;
if (horFlipSetting == Both){
FeatureResponseType featureResponse2 = *resultPtr2;
value = static_cast<int>(!(featureResponse2 <= threshold));
assert(value == 0 || value == 1);
assert(counterShared[(2 * label) * blockDim.x + 2 * threadIdx.x + value] < COUNTER_MAX);
counterShared[(2 * label) * blockDim.x + 2 * threadIdx.x + value]++;}
resultPtr2 += blockDim.x; // need to change if featureResponseOffset calculation changes
// no need to sync here because data is accessed only by the same thread in this loop
}
// no sync needed here because it is done in the loop over the labels
assert(isPowerOfTwo(blockDim.x));
// reduce the 2*labels*threads counters in shared memory to 2*labels counters
for (uint8_t label = 0; label < numLabels; label++) {
// skip labels without samples
if (__syncthreads_or(labelFlags & (1 << label)) == 0) { //this part sometimes causes problems but it's slower without it
if (threadIdx.x < 2) {
counterShared[2 * label + threadIdx.x] = 0;
}
continue;
}
unsigned int idxA = (2 * label) * blockDim.x + threadIdx.x;
for (unsigned int offset = blockDim.x; offset > 2; offset /= 2) {
if (threadIdx.x < offset) {
// check for counter overflow
assert(COUNTER_MAX - counterShared[idxA] >= counterShared[idxA + offset]);
counterShared[idxA] += counterShared[idxA + offset];
}
__syncthreads();
}
if (threadIdx.x < 2) {
// write final result to a different (already unused) location in shared memory
// this way, bank conflicts are avoided at the very end, when data is loaded from shared memory to write it to global memory in a coalesced manner
counterShared[2 * label + threadIdx.x] = counterShared[idxA] + counterShared[idxA + 2];
}
}
if (threadIdx.x < 2 * numLabels) {
const unsigned int label = threadIdx.x / 2;
const unsigned int value = threadIdx.x % 2;
assert(threadIdx.x == 2 * label + value);
const unsigned short count = counterShared[threadIdx.x];
const unsigned int cidx = counterOffset(label, value, thresh, feature, numLabels, numFeatures,
numThresholds);
counters[cidx] += count;
}
}
void ImageFeatureEvaluation::selectDevice() {
int currentDeviceId;
cudaSafeCall(hipGetDevice(¤tDeviceId));
const std::vector<int> deviceIds = configuration.getDeviceIds();
const int targetDeviceId = deviceIds[treeId % deviceIds.size()];
if (currentDeviceId != targetDeviceId) {
CURFIL_DEBUG("tree " << treeId << ": switching from device " << currentDeviceId << " to " << targetDeviceId);
cudaSafeCall(hipSetDevice(targetDeviceId));
cudaSafeCall(hipGetDevice(¤tDeviceId));
if (currentDeviceId != targetDeviceId) {
throw std::runtime_error("failed to switch GPU device");
}
}
}
void ImageFeatureEvaluation::initDevice() {
selectDevice();
hipDeviceProp_t prop;
int currentDeviceId;
cudaSafeCall(hipGetDevice(¤tDeviceId));
cudaSafeCall(hipGetDeviceProperties(&prop, currentDeviceId));
CURFIL_INFO("GPU Device " << currentDeviceId << ": " << prop.name);
{
tbb::mutex::scoped_lock initLock(initMutex);
if (!initialized) {
for (int i = 0; i < NUM_STREAMS; i++) {
cudaSafeCall(hipStreamCreate(&streams[i]));
}
initialized = true;
CURFIL_DEBUG("created " << NUM_STREAMS << " streams");
}
}
}
static void addBatch(RandomTree<PixelInstance, ImageFeatureFunction>& node,
std::vector<std::vector<const PixelInstance*> >& batches,
std::vector<const PixelInstance*>& currentBatch,
std::set<const RGBDImage*>& imagesInCurrentBatch) {
assert(!currentBatch.empty());
unsigned int batchNr = batches.size();
std::set<LabelType> labels;
for (size_t i = 0; i < currentBatch.size(); i++) {
labels.insert(currentBatch[i]->getLabel());
}
node.setTimerAnnotation((boost::format("batch%d.numSamples") % batchNr).str(), currentBatch.size());
node.setTimerAnnotation((boost::format("batch%d.numImages") % batchNr).str(), imagesInCurrentBatch.size());
node.setTimerAnnotation((boost::format("batch%d.numLabels") % batchNr).str(), labels.size());
CURFIL_DEBUG((boost::format("batch%d.numSamples: %d") % batchNr % currentBatch.size()).str());
CURFIL_DEBUG((boost::format("batch%d.numImages: %d") % batchNr % imagesInCurrentBatch.size()).str());
batches.push_back(currentBatch);
currentBatch.clear();
imagesInCurrentBatch.clear();
}
std::vector<std::vector<const PixelInstance*> > ImageFeatureEvaluation::prepare(
const std::vector<const PixelInstance*>& hostSamples,
RandomTree<PixelInstance, ImageFeatureFunction>& node, cuv::dev_memory_space, bool keepMutexLocked) {
selectDevice();
assert(hostSamples.size() > 0);
textureMutex.lock();
utils::Timer prepareTime;
std::vector<const PixelInstance*> samples;
// take samples with cached images first. then the uncached images
for (size_t sample = 0; sample < hostSamples.size(); sample++) {
if (imageCache.containsElement(hostSamples[sample]->getRGBDImage())) {
samples.push_back(hostSamples[sample]);
}
}
for (size_t sample = 0; sample < hostSamples.size(); sample++) {
if (!imageCache.containsElement(hostSamples[sample]->getRGBDImage())) {
samples.push_back(hostSamples[sample]);
}
}
assert(samples.size() == hostSamples.size());
std::vector<std::vector<const PixelInstance*> > batches;
std::vector<const PixelInstance*> currentBatch;
std::set<const RGBDImage*> imagesInCurrentBatch;
for (size_t sampleNr = 0; sampleNr < samples.size(); sampleNr++) {
const PixelInstance* sample = samples[sampleNr];
assert(sample->getDepth().isValid());
if ((imagesInCurrentBatch.find(sample->getRGBDImage()) == imagesInCurrentBatch.end()
&& imagesInCurrentBatch.size() >= static_cast<size_t>(configuration.getImageCacheSize()))
|| currentBatch.size() == configuration.getMaxSamplesPerBatch()) {
addBatch(node, batches, currentBatch, imagesInCurrentBatch);
}
imagesInCurrentBatch.insert(sample->getRGBDImage());
currentBatch.push_back(sample);
}
if (!currentBatch.empty()) {
addBatch(node, batches, currentBatch, imagesInCurrentBatch);
}
assert(!batches.empty());
imageWidth = batches[0][0]->width();
imageHeight = batches[0][0]->height();
node.setTimerValue("prepareBatches", prepareTime);
if (!keepMutexLocked) {
textureMutex.unlock();
}
return batches;
}
template<>
void ImageFeatureEvaluation::sortFeatures(
ImageFeaturesAndThresholds<cuv::dev_memory_space>& featuresAndThresholds,
const cuv::ndarray<int, cuv::dev_memory_space>& keysIndices) const {
utils::Profile profile("sortFeatures");
unsigned int numFeatures = configuration.getFeatureCount();
ImageFeaturesAndThresholds<cuv::dev_memory_space> sortedFeaturesAndThresholds(numFeatures,
configuration.getThresholds(), featuresAllocator);
thrust::device_ptr<int> k(keysIndices[cuv::indices[0][cuv::index_range()]].ptr());
thrust::device_ptr<int> i(keysIndices[cuv::indices[1][cuv::index_range()]].ptr());
thrust::sort_by_key(k, k + numFeatures, i);
cuv::ndarray<int8_t, cuv::dev_memory_space> features = featuresAndThresholds.features();
cuv::ndarray<int8_t, cuv::dev_memory_space> sortedFeatures = sortedFeaturesAndThresholds.features();
assert(features.shape() == sortedFeatures.shape());
const size_t dim = features.shape(0);
assert(dim == 11);
for (size_t d = 0; d < dim; d++) {
thrust::device_ptr<int8_t> ptr(features[cuv::indices[d][cuv::index_range()]].ptr());
thrust::device_ptr<int8_t> sortedPtr(sortedFeatures[cuv::indices[d][cuv::index_range()]].ptr());
thrust::gather(i, i + numFeatures, ptr, sortedPtr);
}
for (size_t thresh = 0; thresh < configuration.getThresholds(); thresh++) {
thrust::device_ptr<float> thresholdsPtr(
featuresAndThresholds.thresholds()[cuv::indices[thresh][cuv::index_range()]].ptr());
thrust::device_ptr<float> sortedThresholdsPtr(
sortedFeaturesAndThresholds.thresholds()[cuv::indices[thresh][cuv::index_range()]].ptr());
thrust::gather(i, i + numFeatures, thresholdsPtr, sortedThresholdsPtr);
}
featuresAndThresholds = sortedFeaturesAndThresholds;
}
ImageFeaturesAndThresholds<cuv::dev_memory_space> ImageFeatureEvaluation::generateRandomFeatures(
const std::vector<const PixelInstance*>& samples, int seed, const bool sort, cuv::dev_memory_space) {
unsigned int numFeatures = configuration.getFeatureCount();
unsigned int numThresholds = configuration.getThresholds();
tbb::mutex::scoped_lock textureLock(textureMutex);
Samples<cuv::dev_memory_space> samplesOnDevice = copySamplesToDevice(samples, streams[0]);
ImageFeaturesAndThresholds<cuv::dev_memory_space> featuresAndThresholds(numFeatures, numThresholds,
featuresAllocator);
cuv::ndarray<int, cuv::dev_memory_space> keysIndices(2, numFeatures, keysIndicesAllocator);
int threadsPerBlock = ::min(numFeatures, 128u);
int blocks = ::ceil(numFeatures / static_cast<float>(threadsPerBlock));
const size_t numSamples = samplesOnDevice.data.shape(1);
assert(numSamples == samples.size());
{
cudaSafeCall(hipFuncSetCacheConfig(generateRandomFeaturesKernel, hipFuncCachePreferL1));
utils::Profile profile("generateRandomFeatures");
hipLaunchKernelGGL(( generateRandomFeaturesKernel), dim3(blocks), dim3(threadsPerBlock), 0, streams[0], seed,
numFeatures,
keysIndices[cuv::indices[0][cuv::index_range()]].ptr(),
keysIndices[cuv::indices[1][cuv::index_range()]].ptr(),
configuration.getBoxRadius(), configuration.getRegionSize(),
featuresAndThresholds.types().ptr(),
featuresAndThresholds.offset1X().ptr(), featuresAndThresholds.offset1Y().ptr(),
featuresAndThresholds.region1X().ptr(), featuresAndThresholds.region1Y().ptr(),
featuresAndThresholds.offset2X().ptr(), featuresAndThresholds.offset2Y().ptr(),
featuresAndThresholds.region2X().ptr(), featuresAndThresholds.region2Y().ptr(),
featuresAndThresholds.channel1().ptr(), featuresAndThresholds.channel2().ptr(),
featuresAndThresholds.thresholds().ptr(),
numThresholds,
numSamples,
imageWidth, imageHeight,
samplesOnDevice.imageNumbers,
samplesOnDevice.depths,
samplesOnDevice.sampleX,
samplesOnDevice.sampleY,
samplesOnDevice.labels,
configuration.isUseDepthImages()
);
if (profile.isEnabled()) {
cudaSafeCall(hipStreamSynchronize(streams[0]));
}
}
if (sort) {
sortFeatures(featuresAndThresholds, keysIndices);
}
cudaSafeCall(hipStreamSynchronize(streams[0]));
return featuresAndThresholds;
}
template<>
cuv::ndarray<WeightType, cuv::dev_memory_space> ImageFeatureEvaluation::calculateFeatureResponsesAndHistograms(
RandomTree<PixelInstance, ImageFeatureFunction>& node,
const std::vector<std::vector<const PixelInstance*> >& batches,
const ImageFeaturesAndThresholds<cuv::dev_memory_space>& featuresAndThresholds,
cuv::ndarray<FeatureResponseType, cuv::host_memory_space>* featureResponsesHost) {
unsigned int numFeatures = configuration.getFeatureCount();
unsigned int numThresholds = configuration.getThresholds();
const size_t numLabels = node.getNumClasses();
#ifndef NDEBUG
{
size_t numLabelsCheck = 0;
for (size_t batch = 0; batch < batches.size(); batch++) {
for (size_t sample = 0; sample < batches[batch].size(); sample++) {
numLabelsCheck = ::max(numLabelsCheck,
static_cast<size_t>(batches[batch][sample]->getLabel() + 1));
}
}
if (numLabelsCheck > numLabels) {
CURFIL_DEBUG("numLabelsCheck: " << numLabelsCheck);
CURFIL_DEBUG("numLabels: " << numLabels);
assert(false);
}
}
#endif
// see function counterOffset()
// features threshold labels 2
std::vector<unsigned int> shape;
shape.push_back(numFeatures);
shape.push_back(numThresholds);
shape.push_back(numLabels);
shape.push_back(2);
cuv::ndarray<WeightType, cuv::dev_memory_space> counters(shape, countersAllocator);
cudaSafeCall(hipMemsetAsync(counters.ptr(), 0,
static_cast<size_t>(counters.size() * sizeof(WeightType)), streams[0]));
assert(numFeatures == configuration.getFeatureCount());
cuv::ndarray<FeatureResponseType, cuv::dev_memory_space> featureResponsesDevice1(numFeatures,
configuration.getMaxSamplesPerBatch(), featureResponsesAllocator);
cuv::ndarray<FeatureResponseType, cuv::dev_memory_space> featureResponsesDevice2(featureResponsesAllocator);
if (featureResponsesHost) {
size_t totalSamples = 0;
for (size_t batch = 0; batch < batches.size(); batch++) {
totalSamples += batches[batch].size();
}
featureResponsesHost->resize(numFeatures, totalSamples);
}
FeatureResponseType* featureResponses2ptr;
featureResponses2ptr = 0;
size_t samplesProcessed = 0;
{
for (size_t batch = 0; batch < batches.size(); batch++) {
const std::vector<const PixelInstance*>& currentBatch = batches[batch];
unsigned int batchSize = currentBatch.size();
if (batch > 0) {
textureMutex.lock();
}
Samples<cuv::dev_memory_space> sampleData = copySamplesToDevice(currentBatch, streams[0]);
featureResponsesDevice1.resize(numFeatures, batchSize);
if (configuration.doHorizontalFlipping())
{ featureResponsesDevice2.resize(numFeatures, batchSize);
featureResponses2ptr = featureResponsesDevice2.ptr();
}
unsigned int featuresPerBlock = ::min(numFeatures, 32u);
unsigned int samplesPerBlock = ::min(batchSize, 4u);
int featureBlocks = ::ceil(numFeatures / static_cast<float>(featuresPerBlock));
int sampleBlocks = ::ceil(batchSize / static_cast<float>(samplesPerBlock));
dim3 blockSize(featureBlocks, sampleBlocks);
dim3 threads(samplesPerBlock, featuresPerBlock);
CURFIL_DEBUG("feature response kernel: launching " << blockSize.x << "x" <<blockSize.y
<< " blocks with " << threads.x << "x" << threads.y << " threads");
cudaSafeCall(hipStreamSynchronize(streams[0]));
utils::Timer featureResponseTimer;
{
cudaSafeCall(hipFuncSetCacheConfig(featureResponseKernel, hipFuncCachePreferL1));
utils::Profile profile("calculate feature responses");
hipLaunchKernelGGL(( featureResponseKernel), dim3(blockSize), dim3(threads), 0, streams[0],
featureResponsesDevice1.ptr(),
featureResponses2ptr,
featuresAndThresholds.types().ptr(),
imageWidth, imageHeight,
featuresAndThresholds.offset1X().ptr(), featuresAndThresholds.offset1Y().ptr(),
featuresAndThresholds.offset2X().ptr(), featuresAndThresholds.offset2Y().ptr(),
featuresAndThresholds.region1X().ptr(), featuresAndThresholds.region1Y().ptr(),
featuresAndThresholds.region2X().ptr(), featuresAndThresholds.region2Y().ptr(),
featuresAndThresholds.channel1().ptr(), featuresAndThresholds.channel2().ptr(),
sampleData.sampleX, sampleData.sampleY, sampleData.depths, sampleData.imageNumbers, sampleData.horFlipSetting,
numFeatures,
batchSize
);
if (profile.isEnabled()) {
cudaSafeCall(hipStreamSynchronize(streams[0]));
}
}
cudaSafeCall(hipStreamSynchronize(streams[0]));
//please note that featureResponsesDevice2 was not added
if (featureResponsesHost) {
// append feature responses on device to the feature responses for our caller
(*featureResponsesHost)[cuv::indices[cuv::index_range()][cuv::index_range(samplesProcessed,
samplesProcessed + batchSize)]] = featureResponsesDevice1;
}
node.addTimerValue("featureResponse", featureResponseTimer);
node.setTimerValue((boost::format("batch%d.featureResponse") % batch).str(), featureResponseTimer);
textureMutex.unlock();
utils::Timer aggregateHistogramsTimer;
assert(numLabels > 0);
{
int threadsPerBlock = 128;
if (batchSize <= 3000) {
threadsPerBlock = 64;
}
dim3 blockSize(numThresholds, numFeatures);
dim3 threads(threadsPerBlock);
utils::Profile profile((boost::format("aggregate histograms (%d samples)") % batchSize).str());
unsigned int sharedMemory = sizeof(unsigned short) * 2 * numLabels * threadsPerBlock;
cudaSafeCall(hipFuncSetCacheConfig(aggregateHistogramsKernel, hipFuncCachePreferShared));
hipLaunchKernelGGL(( aggregateHistogramsKernel), dim3(blockSize), dim3(threads), sharedMemory, streams[1],
featureResponsesDevice1.ptr(),
featureResponses2ptr,
counters.ptr(),
featuresAndThresholds.thresholds().ptr(),
sampleData.labels,
sampleData.horFlipSetting,
numThresholds,
numLabels,
numFeatures,
batchSize
);
if (profile.isEnabled()) {
cudaSafeCall(hipStreamSynchronize(streams[1]));
}
}
cudaSafeCall(hipStreamSynchronize(streams[1]));
node.addTimerValue("aggregateHistograms", aggregateHistogramsTimer);
node.setTimerValue((boost::format("batch%d.aggregateHistograms") % batch).str(),
aggregateHistogramsTimer);
samplesProcessed += batchSize;
}
}
return counters;
}
template<>
cuv::ndarray<ScoreType, cuv::host_memory_space> ImageFeatureEvaluation::calculateScores(
const cuv::ndarray<WeightType, cuv::dev_memory_space>& counters,
const ImageFeaturesAndThresholds<cuv::dev_memory_space>& featuresAndThresholds,
const cuv::ndarray<WeightType, cuv::dev_memory_space>& histogram) {
const unsigned int numFeatures = configuration.getFeatureCount();
const unsigned int numThresholds = configuration.getThresholds();
cuv::ndarray<ScoreType, cuv::dev_memory_space> scores(numThresholds, numFeatures, scoresAllocator);
const size_t numLabels = histogram.size();
assert(counters.shape(2) == numLabels);
assert(numLabels > 0);
{
int threadsPerBlock = ::min(numFeatures, 128u);
int blocks = ::ceil(numFeatures / static_cast<float>(threadsPerBlock));
dim3 threads(threadsPerBlock);
dim3 blockSize(blocks, numThresholds);
utils::Profile profile("score kernel");
cudaSafeCall(hipFuncSetCacheConfig(scoreKernel, hipFuncCachePreferL1));
hipLaunchKernelGGL(( scoreKernel), dim3(blockSize), dim3(threads), 0, streams[1],
counters.ptr(),
featuresAndThresholds.thresholds().ptr(),
numThresholds,
numLabels,
numFeatures,
histogram.ptr(),
scores.ptr()
);
if (profile.isEnabled()) {
cudaSafeCall(hipStreamSynchronize(streams[1]));
}
}
cuv::ndarray<ScoreType, cuv::host_memory_space> scoresCPU(scores, streams[1]);
cudaSafeCall(hipStreamSynchronize(streams[1]));
return scoresCPU;
}
boost::shared_ptr<const TreeNodes> convertTree(
const boost::shared_ptr<const RandomTreeImage>& randomTreeImage) {
const boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> >& tree =
randomTreeImage->getTree();
utils::Profile profile("convertTree");
TreeNodes treeNodes(tree);
return boost::make_shared<const TreeNodes>(treeNodes);
}
}
| 53ae9df12c362581f8c76d0e429e9b5b926320c1.cu | #if 0
#######################################################################################
# The MIT License
# Copyright (c) 2014 Hannes Schulz, University of Bonn <schulz@ais.uni-bonn.de>
# Copyright (c) 2013 Benedikt Waldvogel, University of Bonn <mail@bwaldvogel.de>
# Copyright (c) 2008-2009 Sebastian Nowozin <nowozin@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#######################################################################################
#endif
#include "random_tree_image_gpu.h"
#include <boost/format.hpp>
#include <cuda_runtime_api.h>
#include <curand_kernel.h>
#include <set>
#include <tbb/mutex.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include "random_tree_image.h"
#include "score.h"
#include "utils.h"
namespace curfil {
// must be global
texture<float, cudaTextureType2DLayered, cudaReadModeElementType> colorTexture;
texture<int, cudaTextureType2DLayered, cudaReadModeElementType> depthTexture;
texture<float, cudaTextureType2DLayered, cudaReadModeElementType> treeTexture;
tbb::mutex initMutex;
volatile bool initialized = false;
tbb::mutex textureMutex;
const static int NUM_STREAMS = 2;
cudaStream_t streams[NUM_STREAMS] = { NULL, NULL };
ImageCache imageCache;
TreeCache treeCache;
__device__
float getColorChannelValue(int x, int y, int imageNr, int channel) {
assert(channel < colorChannels);
return tex2DLayered(colorTexture, x, y, imageNr * colorChannels + channel);
}
__device__
int getDepthValue(int x, int y, int imageNr) {
return tex2DLayered(depthTexture, x, y, imageNr * depthChannels + depthChannel);
}
__device__
int getDepthValidValue(int x, int y, int imageNr) {
return tex2DLayered(depthTexture, x, y, imageNr * depthChannels + depthValidChannel);
}
__device__
FeatureResponseType averageRegionDepth(int imageNr,
const int16_t imageWidth, const int16_t imageHeight,
int leftX, int rightX, int upperY, int lowerY) {
if (leftX < 0 || rightX >= imageWidth || upperY < 0 || lowerY >= imageHeight) {
return nan("");
}
int upperLeftValid = getDepthValidValue(leftX, upperY, imageNr);
int upperRightValid = getDepthValidValue(rightX, upperY, imageNr);
int lowerRightValid = getDepthValidValue(rightX, lowerY, imageNr);
int lowerLeftValid = getDepthValidValue(leftX, lowerY, imageNr);
int numValid = (lowerRightValid - upperRightValid) + (upperLeftValid - lowerLeftValid);
assert(numValid >= 0 && numValid <= (rightX - leftX) * (lowerY - upperY));
if (numValid == 0) {
return nan("");
}
int upperLeftDepth = getDepthValue(leftX, upperY, imageNr);
int upperRightDepth = getDepthValue(rightX, upperY, imageNr);
int lowerRightDepth = getDepthValue(rightX, lowerY, imageNr);
int lowerLeftDepth = getDepthValue(leftX, lowerY, imageNr);
int sum = (lowerRightDepth - upperRightDepth) + (upperLeftDepth - lowerLeftDepth);
FeatureResponseType feat = sum / static_cast<FeatureResponseType>(1000);
return (feat / numValid);
}
__device__
FeatureResponseType averageRegionDepth(int imageNr,
const int16_t imageWidth, const int16_t imageHeight,
float depth,
int sampleX, int sampleY,
int offsetX, int offsetY,
int regionWidth, int regionHeight) {
int width = max(1, static_cast<int>(regionWidth / depth));
int height = max(1, static_cast<int>(regionHeight / depth));
int x = sampleX + static_cast<int>(offsetX / depth);
int y = sampleY + static_cast<int>(offsetY / depth);
int leftX = x - width;
int rightX = x + width;
int upperY = y - height;
int lowerY = y + height;
return averageRegionDepth(imageNr, imageWidth, imageHeight, leftX, rightX, upperY, lowerY);
}
__device__
FeatureResponseType averageRegionColor(int imageNr,
uint16_t imageWidth, uint16_t imageHeight,
int channel, float depth,
int sampleX, int sampleY,
int offsetX, int offsetY,
int regionWidth, int regionHeight) {
int width = max(1, static_cast<int>(regionWidth / depth));
int height = max(1, static_cast<int>(regionHeight / depth));
int x = sampleX + static_cast<int>(offsetX / depth);
int y = sampleY + static_cast<int>(offsetY / depth);
int leftX = x - width;
int rightX = x + width;
int upperY = y - height;
int lowerY = y + height;
if (leftX < 0 || rightX >= imageWidth || upperY < 0 || lowerY >= imageHeight) {
return nan("");
}
FeatureResponseType upperLeftPixel = getColorChannelValue(leftX, upperY, imageNr, channel);
FeatureResponseType upperRightPixel = getColorChannelValue(rightX, upperY, imageNr, channel);
FeatureResponseType lowerRightPixel = getColorChannelValue(rightX, lowerY, imageNr, channel);
FeatureResponseType lowerLeftPixel = getColorChannelValue(leftX, lowerY, imageNr, channel);
if (isnan(lowerRightPixel) || isnan(lowerLeftPixel) || isnan(upperRightPixel) || isnan(upperLeftPixel))
return nan("");
FeatureResponseType sum = (lowerRightPixel - upperRightPixel) + (upperLeftPixel - lowerLeftPixel);
return sum;
}
__device__
FeatureResponseType calculateDepthFeature(int imageNr,
int16_t imageWidth, int16_t imageHeight,
int8_t offset1X, int8_t offset1Y,
int8_t offset2X, int8_t offset2Y,
int8_t region1X, int8_t region1Y,
int8_t region2X, int8_t region2Y,
int sampleX, int sampleY, float depth) {
FeatureResponseType a = averageRegionDepth(imageNr, imageWidth, imageHeight, depth, sampleX, sampleY, offset1X,
offset1Y, region1X, region1Y);
if (isnan(a))
return a;
FeatureResponseType b = averageRegionDepth(imageNr, imageWidth, imageHeight, depth, sampleX, sampleY, offset2X,
offset2Y, region2X, region2Y);
if (isnan(b))
return b;
return (a - b);
}
__device__
FeatureResponseType calculateColorFeature(int imageNr,
const int16_t imageWidth, const int16_t imageHeight,
int8_t offset1X, int8_t offset1Y,
int8_t offset2X, int8_t offset2Y,
int8_t region1X, int8_t region1Y,
int8_t region2X, int8_t region2Y,
int8_t channel1, int8_t channel2,
int sampleX, int sampleY, float depth) {
assert(channel1 >= 0 && channel1 < 3);
assert(channel2 >= 0 && channel2 < 3);
FeatureResponseType a = averageRegionColor(imageNr, imageWidth, imageHeight, channel1, depth, sampleX, sampleY,
offset1X, offset1Y, region1X, region1Y);
if (isnan(a))
return a;
FeatureResponseType b = averageRegionColor(imageNr, imageWidth, imageHeight, channel2, depth, sampleX, sampleY,
offset2X, offset2Y, region2X, region2Y);
if (isnan(b))
return b;
return (a - b);
}
__global__
void setupRandomStatesKernel(unsigned long long seed, curandState* state, unsigned int numFeatures) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < numFeatures) {
/* Each thread gets same seed, a different sequence number, no offset */
curand_init(seed, id, 0, &state[id]);
}
}
__device__
void randomOffset(curandState* state, int8_t* x, int8_t* y, const uint8_t radius) {
const uint8_t boxRadius = radius + 1;
const int8_t vx = curand_uniform(state) * 2 * boxRadius - boxRadius;
const int8_t vy = curand_uniform(state) * 2 * boxRadius - boxRadius;
assert(vx >= -boxRadius && vx <= boxRadius);
assert(vy >= -boxRadius && vy <= boxRadius);
*x = vx;
*y = vy;
}
__device__
void randomRegion(curandState* state, int8_t* x, int8_t* y, const uint8_t regionSize) {
const int8_t vx = curand_uniform(state) * regionSize + 1;
const int8_t vy = curand_uniform(state) * regionSize + 1;
assert(vx >= 1 && vx <= regionSize);
assert(vy >= 1 && vy <= regionSize);
*x = vx;
*y = vy;
}
__global__
void generateRandomFeaturesKernel(int seed,
unsigned int numFeatures,
int* keys, int* indices,
uint16_t boxRadius,
uint16_t regionSize,
int8_t* types,
int8_t* offsets1X, int8_t* offsets1Y,
int8_t* regions1X, int8_t* regions1Y,
int8_t* offsets2X, int8_t* offsets2Y,
int8_t* regions2X, int8_t* regions2Y,
int8_t* channels1, int8_t* channels2,
float* thresholds,
unsigned int numThresholds,
unsigned int numSamples,
int imageWidth, int imageHeight,
int* imageNumbers,
float* depths,
int* sampleX,
int* sampleY,
uint8_t* sampleLabel,
bool isUseDepthImages) {
int feat = blockIdx.x * blockDim.x + threadIdx.x;
if (feat >= numFeatures) {
return;
}
curandState localState;
curand_init(seed, feat, 0, &localState);
uint8_t type;
int8_t offset1X, offset1Y;
int8_t offset2X, offset2Y;
int8_t region1X, region1Y;
int8_t region2X, region2Y;
uint8_t channel1, channel2;
if (isUseDepthImages)
type = static_cast<uint8_t>(feat >= numFeatures / 2);
else
type = COLOR;
types[feat] = type;
randomOffset(&localState, &offset1X, &offset1Y, boxRadius);
randomRegion(&localState, ®ion1X, ®ion1Y, regionSize);
do {
randomOffset(&localState, &offset2X, &offset2Y, boxRadius);
randomRegion(&localState, ®ion2X, ®ion2Y, regionSize);
} while (offset1X == offset2X && offset1Y == offset2Y);
if (type == COLOR) {
// chan1=MOD(INT(A2/(100/2)*3);3)
// chan2=MOD(INT(A2/(100/2/3)*3);3)
if (isUseDepthImages) {
channel1 = feat / (numFeatures / 2.0) * 3;
channel1 %= 3;
channel2 = feat / (numFeatures / 2.0 / 3) * 3;
channel2 %= 3;
}
else
{
channel1 = feat / (numFeatures / 1.0 ) * 3;
channel1 %= 3;
channel2 = feat / (numFeatures / 1.0 / 3) * 3;
channel2 %= 3;
}
// channel1 = curand_uniform(&localState) * 3;
// channel2 = curand_uniform(&localState) * 3;
} else {
channel1 = 0;
channel2 = 0;
}
for (unsigned int thresh = 0; thresh < numThresholds; thresh++) {
unsigned int numSample = curand_uniform(&localState) * (numSamples - 1);
FeatureResponseType featureResponse;
switch (type) {
case COLOR:
featureResponse = calculateColorFeature(imageNumbers[numSample],
imageWidth, imageHeight,
offset1X, offset1Y,
offset2X, offset2Y,
region1X, region1Y,
region2X, region2Y,
channel1, channel2,
sampleX[numSample], sampleY[numSample], depths[numSample]);
break;
case DEPTH:
featureResponse = calculateDepthFeature(imageNumbers[numSample],
imageWidth, imageHeight,
offset1X, offset1Y,
offset2X, offset2Y,
region1X, region1Y,
region2X, region2Y,
sampleX[numSample], sampleY[numSample], depths[numSample]);
break;
default:
assert(false);
break;
}
if (isnan(featureResponse)) {
featureResponse = 0.0;
}
thresholds[thresh * numFeatures + feat] = featureResponse;
}
int32_t sortKey = 0;
sortKey |= static_cast<uint8_t>(type & 0x03) << 30; // 2 bit for the type
sortKey |= static_cast<uint8_t>(channel1 & 0x0F) << 26; // 4 bit for channel1
sortKey |= static_cast<uint8_t>(channel2 & 0x0F) << 22; // 4 bit for channel2
sortKey |= static_cast<uint8_t>((offset1Y + 127) & 0xFF) << 14; // 8 bit for offset1.y
sortKey |= static_cast<uint8_t>((offset1X + 127) & 0xFF) << 6; // 8 bit for offset1.x
keys[feat] = sortKey;
assert(keys[feat] >= 0);
indices[feat] = feat;
offsets1X[feat] = offset1X;
offsets1Y[feat] = offset1Y;
regions1X[feat] = region1X;
regions1Y[feat] = region1Y;
offsets2X[feat] = offset2X;
offsets2Y[feat] = offset2Y;
regions2X[feat] = region2X;
regions2Y[feat] = region2Y;
channels1[feat] = channel1;
channels2[feat] = channel2;
}
Samples<cuv::dev_memory_space> ImageFeatureEvaluation::copySamplesToDevice(
const std::vector<const PixelInstance*>& samples, cudaStream_t stream) {
imageCache.copyImages(configuration.getImageCacheSize(), samples);
cudaSafeCall(cudaStreamSynchronize(stream));
utils::Profile p("copySamplesToDevice");
Samples<cuv::host_memory_space> samplesOnHost(samples.size(), sampleDataAllocator);
for (size_t i = 0; i < samples.size(); i++) {
const PixelInstance* sample = samples[i];
samplesOnHost.imageNumbers[i] = imageCache.getElementPos(sample->getRGBDImage());
samplesOnHost.depths[i] = sample->getDepth().getFloatValue();
samplesOnHost.sampleX[i] = sample->getX();
samplesOnHost.sampleY[i] = sample->getY();
samplesOnHost.labels[i] = sample->getLabel();
samplesOnHost.horFlipSetting[i] = sample->getHorFlipSetting();
}
utils::Timer copySamplesAssignTimer;
Samples<cuv::dev_memory_space> samplesOnDevice(samplesOnHost, stream);
cudaSafeCall(cudaStreamSynchronize(stream));
return samplesOnDevice;
}
void clearImageCache() {
CURFIL_INFO("clearing image cache");
imageCache.clear();
}
TreeNodes::TreeNodes(const TreeNodes& other) :
m_treeId(other.getTreeId()),
m_numNodes(other.numNodes()),
m_numLabels(other.numLabels()),
m_sizePerNode(other.sizePerNode()),
m_data(other.data())
{
}
TreeNodes::TreeNodes(const boost::shared_ptr<const RandomTree<PixelInstance, ImageFeatureFunction> >& tree) :
m_treeId(tree->getTreeId()),
m_numNodes(tree->countNodes()),
m_numLabels(tree->getNumClasses()),
m_sizePerNode(offsetHistograms + sizeof(float) * m_numLabels),
m_data(LAYERS_PER_TREE * NODES_PER_TREE_LAYER, m_sizePerNode)
{
assert(offsetHistograms == 24);
const unsigned int MAX_NODES = LAYERS_PER_TREE * NODES_PER_TREE_LAYER;
if (m_numNodes > MAX_NODES) {
throw std::runtime_error((boost::format("too many nodes in tree %d: %d (max: %d)")
% tree->getTreeId() % m_numNodes % MAX_NODES).str());
}
convert(tree);
assert(m_numLabels == tree->getHistogram().size());
}
template<class T>
void TreeNodes::setValue(size_t node, size_t offset, const T& value) {
const size_t layer = node / NODES_PER_TREE_LAYER;
const size_t nodeOffset = node % NODES_PER_TREE_LAYER;
assert(layer * NODES_PER_TREE_LAYER + nodeOffset == node);
if (layer >= LAYERS_PER_TREE) {
throw std::runtime_error((boost::format("illegal layer: %d (node: %d)")
% layer % node).str());
}
T* ptr = reinterpret_cast<T*>(m_data.ptr()
+ layer * NODES_PER_TREE_LAYER * m_sizePerNode
+ nodeOffset * m_sizePerNode + offset);
// CURFIL_DEBUG("setting value for node " << node << " at pos (" << layer << "," << nodeOffset << "," << offset << ") to " << static_cast<double>(value));
*ptr = value;
}
void TreeNodes::setLeftNodeOffset(size_t node, int offset) {
setValue(node, offsetLeftNode, offset);
}
void TreeNodes::setThreshold(size_t node, float threshold) {
setValue(node, offsetThreshold, threshold);
}
void TreeNodes::setHistogramValue(size_t node, size_t label, float value) {
assert(offsetHistograms == 6 * sizeof(float));
setValue(node, offsetHistograms + label * sizeof(value), value);
}
void TreeNodes::setType(size_t node, int8_t value) {
setValue(node, offsetTypes, static_cast<int>(value));
}
void TreeNodes::setOffset1X(size_t node, int8_t value) {
setValue(node, offsetFeatures + 0 * sizeof(value), value);
}
void TreeNodes::setOffset1Y(size_t node, int8_t value) {
setValue(node, offsetFeatures + 1 * sizeof(value), value);
}
void TreeNodes::setRegion1X(size_t node, int8_t value) {
setValue(node, offsetFeatures + 2 * sizeof(value), value);
}
void TreeNodes::setRegion1Y(size_t node, int8_t value) {
setValue(node, offsetFeatures + 3 * sizeof(value), value);
}
void TreeNodes::setOffset2X(size_t node, int8_t value) {
setValue(node, offsetFeatures + 4 * sizeof(value), value);
}
void TreeNodes::setOffset2Y(size_t node, int8_t value) {
setValue(node, offsetFeatures + 5 * sizeof(value), value);
}
void TreeNodes::setRegion2X(size_t node, int8_t value) {
setValue(node, offsetFeatures + 6 * sizeof(value), value);
}
void TreeNodes::setRegion2Y(size_t node, int8_t value) {
setValue(node, offsetFeatures + 7 * sizeof(value), value);
}
void TreeNodes::setChannel1(size_t node, uint16_t value) {
setValue(node, offsetChannels + 0 * sizeof(value), value);
}
void TreeNodes::setChannel2(size_t node, uint16_t value) {
setValue(node, offsetChannels + 1 * sizeof(value), value);
}
void TreeNodes::convert(const boost::shared_ptr<const RandomTree<PixelInstance, ImageFeatureFunction> >& tree) {
size_t offset = tree->getNodeId() - tree->getTreeId();
if (offset >= m_numNodes) {
throw std::runtime_error((boost::format("tree %d, illegal offset: %d (numNodes: %d)")
% tree->getTreeId() % offset % m_numNodes).str());
}
// could be limited to the leaf-node case
const cuv::ndarray<double, cuv::host_memory_space>& histogram = tree->getNormalizedHistogram();
assert(histogram.ndim() == 1);
assert(histogram.shape(0) == m_numLabels);
for (size_t label = 0; label < histogram.shape(0); label++) {
setHistogramValue(offset, label, static_cast<float>(histogram(label)));
}
if (tree->isLeaf()) {
setLeftNodeOffset(offset, -1);
setThreshold(offset, std::numeric_limits<float>::quiet_NaN());
return;
}
// decision node
const ImageFeatureFunction& feature = tree->getSplit().getFeature();
setType(offset, static_cast<int8_t>(feature.getType()));
setOffset1X(offset, static_cast<int8_t>(feature.getOffset1().getX()));
setOffset1Y(offset, static_cast<int8_t>(feature.getOffset1().getY()));
setRegion1X(offset, static_cast<int8_t>(feature.getRegion1().getX()));
setRegion1Y(offset, static_cast<int8_t>(feature.getRegion1().getY()));
setOffset2X(offset, static_cast<int8_t>(feature.getOffset2().getX()));
setOffset2Y(offset, static_cast<int8_t>(feature.getOffset2().getY()));
setRegion2X(offset, static_cast<int8_t>(feature.getRegion2().getX()));
setRegion2Y(offset, static_cast<int8_t>(feature.getRegion2().getY()));
setChannel1(offset, static_cast<int8_t>(feature.getChannel1()));
setChannel2(offset, static_cast<int8_t>(feature.getChannel2()));
setThreshold(offset, tree->getSplit().getThreshold());
convert(tree->getLeft());
convert(tree->getRight());
// tree nodes must be already in breadth-first order
assert(tree->getRight()->getNodeId() == tree->getLeft()->getNodeId() + 1);
const int leftNodeOffset = tree->getLeft()->getNodeId() - tree->getNodeId();
assert(leftNodeOffset > 0);
setLeftNodeOffset(offset, leftNodeOffset);
}
void DeviceCache::setBound(bool bound) {
assert(bound != this->bound);
this->bound = bound;
}
DeviceCache::~DeviceCache() {
// clear() must be called in destructor of derived class
assert(!bound);
assert(elementTimes.empty());
assert(elementIdMap.empty());
assert(currentTime == 0);
}
bool DeviceCache::containsElement(const void* element) const {
assert(element);
return (elementIdMap.find(element) != elementIdMap.end());
}
size_t DeviceCache::getElementPos(const void* element) const {
assert(element);
std::map<const void*, size_t>::const_iterator it = elementIdMap.find(element);
if (it == elementIdMap.end()) {
throw std::runtime_error(getElementName(element) + " not found in cache");
}
return it->second;
}
void DeviceCache::clear() {
if (bound) {
unbind();
}
freeArray();
elementTimes.clear();
elementIdMap.clear();
currentTime = 0;
}
void DeviceCache::copyElements(size_t cacheSize, const std::set<const void*>& elements) {
if (elements.empty())
return;
if (elements.size() > cacheSize) {
throw std::runtime_error(boost::str(boost::format("too many images: %d. max: %d")
% elements.size()
% cacheSize));
}
if (cacheSize != this->cacheSize) {
clear();
this->cacheSize = cacheSize;
}
currentTime++;
size_t numTransferred = 0;
const boost::posix_time::ptime start = boost::posix_time::microsec_clock::local_time();
std::set<const void*>::const_iterator it;
for (it = elements.begin(); it != elements.end(); it++) {
const void* element = *it;
size_t elementPos = 0;
// check if image is already there
if (elementIdMap.find(element) != elementIdMap.end()) {
elementPos = elementIdMap[element];
// update LRU vector time
elementTimes[elementPos] = currentTime;
CURFIL_DEBUG(getElementName(element) << " already in device cache");
continue;
}
// element does not exist yet. transfer it
CURFIL_DEBUG(getElementName(element) << " not yet on device. transferring");
if (elementIdMap.size() < cacheSize) {
elementPos = elementIdMap.size();
} else {
// find least recently used element
size_t oldestTime = currentTime;
std::map<size_t, size_t>::const_iterator it;
for (it = elementTimes.begin(); it != elementTimes.end(); it++) {
if (it->second < oldestTime) {
oldestTime = it->second;
elementPos = it->first;
}
}
assert(oldestTime < currentTime);
CURFIL_DEBUG("replacing " << getElementName(element)
<< " (time: " << oldestTime << ", current: " << currentTime << ")");
{
std::map<const void*, size_t>::iterator it;
for (it = elementIdMap.begin(); it != elementIdMap.end(); it++) {
if (it->second == elementPos) {
CURFIL_DEBUG("removing " << getElementName(it->first) << " at pos " << elementPos);
elementIdMap.erase(it);
break;
}
}
}
}
elementIdMap[element] = elementPos;
elementTimes[elementPos] = currentTime;
CURFIL_DEBUG("transfer " << getElementName(element) << " to pos " << elementPos);
if (bound) {
unbind();
}
transferElement(elementPos, element, streams[0]);
numTransferred++;
}
if (numTransferred > 0) {
CURFIL_DEBUG("transferred " << numTransferred << "/" << elements.size() << " "
<< getElementsName() << " from host to device");
cudaSafeCall(cudaStreamSynchronize(streams[0]));
const boost::posix_time::ptime stop = boost::posix_time::microsec_clock::local_time();
totalTransferTimeMicroseconds += (stop - start).total_microseconds();
}
if (!bound) {
bind();
}
}
void DeviceCache::updateCacheSize(size_t cacheSize) {
if (cacheSize != this->cacheSize) {
clear();
this->cacheSize = cacheSize;
}
}
ImageCache::~ImageCache() {
CURFIL_DEBUG("destroying image cache " << this);
clear();
}
void ImageCache::freeArray() {
assert(!isBound());
if (colorTextureData != NULL) {
cudaFreeArray(colorTextureData);
colorTextureData = NULL;
}
if (depthTextureData != NULL) {
cudaFreeArray(depthTextureData);
depthTextureData = NULL;
}
}
void ImageCache::allocArray() {
assert(!isBound());
unsigned int flags = cudaArrayLayered;
assert(colorTextureData == NULL);
assert(depthTextureData == NULL);
assert(this->width > 0);
assert(this->height > 0);
assert(getCacheSize() > 0);
{
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaExtent extent = make_cudaExtent(width, height, colorChannels * getCacheSize());
cudaSafeCall(cudaMalloc3DArray(&colorTextureData, &channelDesc, extent, flags));
}
{
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindSigned);
cudaExtent extent = make_cudaExtent(width, height, depthChannels * getCacheSize());
cudaSafeCall(cudaMalloc3DArray(&depthTextureData, &channelDesc, extent, flags));
}
}
ImageCache::ImageCache() :
DeviceCache(), width(0), height(0), colorTextureData(NULL), depthTextureData(NULL) {
}
void ImageCache::copyImages(size_t cacheSize, const std::vector<const PixelInstance*>& samples) {
std::set<const RGBDImage*> images;
for (size_t sample = 0; sample < samples.size(); sample++) {
const RGBDImage* image = samples[sample]->getRGBDImage();
images.insert(image);
}
copyImages(cacheSize, images);
}
void ImageCache::copyImages(size_t cacheSize, const std::set<const RGBDImage*>& images) {
if (images.empty())
return;
int width = (*images.begin())->getWidth();
int height = (*images.begin())->getHeight();
#ifndef NDEBUG
{
std::set<const RGBDImage*>::const_iterator it;
for (it = images.begin(); it != images.end(); it++) {
assert(width == (*it)->getWidth());
assert(height == (*it)->getHeight());
}
}
#endif
if (width != this->width || height != this->height) {
this->width = width;
this->height = height;
clear();
}
updateCacheSize(cacheSize);
if (colorTextureData == NULL) {
allocArray();
}
std::set<const void*> elements;
std::set<const RGBDImage*>::const_iterator it;
for (it = images.begin(); it != images.end(); it++) {
elements.insert(*it);
}
copyElements(cacheSize, elements);
}
void ImageCache::transferElement(size_t imagePos, const void* imagePtr, cudaStream_t stream) {
const RGBDImage* image = reinterpret_cast<const RGBDImage*>(imagePtr);
struct cudaMemcpy3DParms colorCopyParams;
memset(&colorCopyParams, 0, sizeof(colorCopyParams));
colorCopyParams.extent = make_cudaExtent(width, height, colorChannels);
colorCopyParams.kind = cudaMemcpyHostToDevice;
colorCopyParams.dstArray = colorTextureData;
struct cudaMemcpy3DParms depthCopyParams;
memset(&depthCopyParams, 0, sizeof(depthCopyParams));
depthCopyParams.extent = make_cudaExtent(width, height, depthChannels);
depthCopyParams.kind = cudaMemcpyHostToDevice;
depthCopyParams.dstArray = depthTextureData;
assert(image->getColorImage().ndim() == 3);
assert(image->getColorImage().shape(0) == static_cast<unsigned int>(colorChannels));
assert(image->getColorImage().shape(1) == static_cast<unsigned int>(height));
assert(image->getColorImage().shape(2) == static_cast<unsigned int>(width));
colorCopyParams.dstPos = make_cudaPos(0, 0, colorChannels * imagePos);
colorCopyParams.srcPtr = make_cudaPitchedPtr(
const_cast<void*>(reinterpret_cast<const void*>(image->getColorImage().ptr())),
sizeof(float) * width, width, height);
cudaSafeCall(cudaMemcpy3DAsync(&colorCopyParams, stream));
assert(image->getDepthImage().ndim() == 3);
assert(image->getDepthImage().shape(0) == static_cast<unsigned int>(depthChannels));
assert(image->getDepthImage().shape(1) == static_cast<unsigned int>(height));
assert(image->getDepthImage().shape(2) == static_cast<unsigned int>(width));
depthCopyParams.dstPos = make_cudaPos(0, 0, depthChannels * imagePos);
depthCopyParams.srcPtr = make_cudaPitchedPtr(
const_cast<void*>(reinterpret_cast<const void*>(image->getDepthImage().ptr())),
sizeof(int) * width, width, height);
cudaSafeCall(cudaMemcpy3DAsync(&depthCopyParams, stream));
}
std::string ImageCache::getElementName(const void* imagePtr) const {
const RGBDImage* image = reinterpret_cast<const RGBDImage*>(imagePtr);
return (boost::format("image %p") % image).str();
}
std::string ImageCache::getElementsName() const {
return "images";
}
void ImageCache::bind() {
assert(!isBound());
colorTexture.normalized = false;
colorTexture.filterMode = cudaFilterModePoint;
colorTexture.addressMode[0] = cudaAddressModeClamp;
colorTexture.addressMode[1] = cudaAddressModeClamp;
colorTexture.addressMode[2] = cudaAddressModeClamp;
assert(colorTextureData != NULL);
cudaSafeCall(cudaBindTextureToArray(colorTexture, colorTextureData));
depthTexture.normalized = false;
depthTexture.filterMode = cudaFilterModePoint;
depthTexture.addressMode[0] = cudaAddressModeClamp;
depthTexture.addressMode[1] = cudaAddressModeClamp;
depthTexture.addressMode[2] = cudaAddressModeClamp;
assert(depthTextureData != NULL);
cudaSafeCall(cudaBindTextureToArray(depthTexture, depthTextureData));
setBound(true);
}
void ImageCache::unbind() {
assert(isBound());
cudaUnbindTexture(colorTexture);
cudaUnbindTexture(depthTexture);
setBound(false);
}
TreeCache::~TreeCache() {
CURFIL_DEBUG("destroying tree cache " << this);
clear();
}
void TreeCache::freeArray() {
assert(!isBound());
if (treeTextureData != NULL) {
cudaFreeArray(treeTextureData);
treeTextureData = NULL;
}
}
void TreeCache::allocArray() {
assert(!isBound());
assert(treeTextureData == NULL);
assert(sizePerNode > 0);
assert(numLabels > 0);
assert(getCacheSize() > 0);
CURFIL_INFO("tree cache: allocating " << getCacheSize() << " x " << LAYERS_PER_TREE << " x "
<< NODES_PER_TREE_LAYER << " x " << sizePerNode << " bytes");
{
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaExtent extent = make_cudaExtent(sizePerNode / sizeof(float), NODES_PER_TREE_LAYER,
LAYERS_PER_TREE * getCacheSize());
cudaSafeCall(cudaMalloc3DArray(&treeTextureData, &channelDesc, extent, cudaArrayLayered));
}
}
TreeCache::TreeCache() :
DeviceCache(), sizePerNode(0), numLabels(0),
treeTextureData(NULL) {
}
void TreeCache::copyTree(size_t cacheSize, const TreeNodes* tree) {
std::set<const TreeNodes*> trees;
trees.insert(tree);
copyTrees(cacheSize, trees);
}
void TreeCache::copyTrees(size_t cacheSize, const std::set<const TreeNodes*>& trees) {
if (trees.empty())
return;
const size_t sizePerNode = (*trees.begin())->sizePerNode();
const LabelType numLabels = (*trees.begin())->numLabels();
{
std::set<const TreeNodes*>::const_iterator it;
for (it = trees.begin(); it != trees.end(); it++) {
assert(sizePerNode == (*it)->sizePerNode());
assert(numLabels == (*it)->numLabels());
}
}
if (numLabels != this->numLabels || sizePerNode != this->sizePerNode) {
this->numLabels = numLabels;
this->sizePerNode = sizePerNode;
clear();
}
updateCacheSize(cacheSize);
if (treeTextureData == NULL) {
allocArray();
}
std::set<const void*> elements;
std::set<const TreeNodes*>::const_iterator it;
for (it = trees.begin(); it != trees.end(); it++) {
elements.insert(*it);
}
copyElements(cacheSize, elements);
}
void TreeCache::transferElement(size_t elementPos, const void* element, cudaStream_t stream) {
assert(!isBound());
utils::Profile profile("transferTree");
const TreeNodes* tree = reinterpret_cast<const TreeNodes*>(element);
struct cudaMemcpy3DParms copyParams;
memset(©Params, 0, sizeof(copyParams));
copyParams.kind = cudaMemcpyHostToDevice;
copyParams.dstArray = treeTextureData;
assert(elementPos < getCacheSize());
const size_t layers = ceil(tree->numNodes() / static_cast<double>(NODES_PER_TREE_LAYER));
assert(layers >= 1);
assert(layers <= LAYERS_PER_TREE);
copyParams.dstPos = make_cudaPos(0, 0, elementPos * LAYERS_PER_TREE);
copyParams.extent = make_cudaExtent(sizePerNode / sizeof(float), NODES_PER_TREE_LAYER, layers);
void* ptr = const_cast<void*>(reinterpret_cast<const void*>(tree->data().ptr()));
CURFIL_INFO("transfer " << getElementName(element) << " to pos " << elementPos
<< " (layer " << elementPos * LAYERS_PER_TREE << ")"
<< " with " << tree->numNodes() << " nodes in " << layers << " layers");
assert(tree->data().size() == sizePerNode * NODES_PER_TREE_LAYER * LAYERS_PER_TREE);
copyParams.srcPtr = make_cudaPitchedPtr(ptr, sizePerNode, sizePerNode / sizeof(float), NODES_PER_TREE_LAYER);
cudaSafeCall(cudaMemcpy3DAsync(©Params, stream));
}
std::string TreeCache::getElementName(const void* element) const {
const TreeNodes* tree = reinterpret_cast<const TreeNodes*>(element);
return (boost::format("tree %d (%p)") % tree->getTreeId() % tree).str();
}
std::string TreeCache::getElementsName() const {
return "trees";
}
void TreeCache::bind() {
assert(!isBound());
treeTexture.normalized = false;
treeTexture.filterMode = cudaFilterModePoint;
treeTexture.addressMode[0] = cudaAddressModeClamp;
treeTexture.addressMode[1] = cudaAddressModeClamp;
treeTexture.addressMode[2] = cudaAddressModeClamp;
assert(treeTextureData != NULL);
cudaSafeCall(cudaBindTextureToArray(treeTexture, treeTextureData));
setBound(true);
}
void TreeCache::unbind() {
assert(isBound());
cudaUnbindTexture(treeTexture);
setBound(false);
}
__device__
static size_t featureResponseOffset(size_t sample, size_t feature,
size_t numSamples, size_t numFeatures) {
// XXX: also need to change pointer arithmetic in aggregateHistogramsKernel
// return sample * numFeatures + feature;
return feature * numSamples + sample;
}
__device__
static unsigned int counterOffset(unsigned int label, unsigned int value, unsigned int threshold, unsigned int feature,
unsigned int numLabels, unsigned int numFeatures, unsigned int numThresholds) {
assert(value == 0 || value == 1);
assert(label < numLabels);
assert(feature < numFeatures);
assert(threshold < numThresholds);
// size_t index = (2 * label + value) * numFeatures * numThresholds + threshold * numFeatures + feature;
// features × thresholds × labels × 2
unsigned int index = feature * numThresholds * numLabels * 2;
index += threshold * numLabels * 2;
index += label * 2;
index += value;
return index;
}
__device__
int getNodeOffset(int node, int tree) {
return node % NODES_PER_TREE_LAYER;
}
__device__
int getLayer(int node, int tree) {
return tree * LAYERS_PER_TREE + node / NODES_PER_TREE_LAYER;
}
__device__
int getLeftNodeOffset(int node, int tree) {
float v = tex2DLayered(treeTexture, 0, getNodeOffset(node, tree), getLayer(node, tree));
return (*reinterpret_cast<int*>(&v));
}
__device__
int getType(int node, int tree) {
float v = tex2DLayered(treeTexture, 1, getNodeOffset(node, tree), getLayer(node, tree));
return (*reinterpret_cast<int*>(&v));
}
__device__
char4 getParam1(int node, int tree) {
float v = tex2DLayered(treeTexture, 2, getNodeOffset(node, tree), getLayer(node, tree));
return (*reinterpret_cast<char4*>(&v));
}
__device__
char4 getParam2(int node, int tree) {
float v = tex2DLayered(treeTexture, 3, getNodeOffset(node, tree), getLayer(node, tree));
return (*reinterpret_cast<char4*>(&v));
}
__device__
ushort2 getChannels(int node, int tree) {
float v = tex2DLayered(treeTexture, 4, getNodeOffset(node, tree), getLayer(node, tree));
return (*reinterpret_cast<ushort2*>(&v));
}
__device__
float getThreshold(int node, int tree) {
return tex2DLayered(treeTexture, 5, getNodeOffset(node, tree), getLayer(node, tree));
}
__device__
float getHistogramValue(int label, int node, int tree) {
return tex2DLayered(treeTexture, 6 + label, getNodeOffset(node, tree), getLayer(node, tree));
}
// for the unit test
__global__ void fetchTreeNodeData(
int* leftNodeOffset,
int* type,
int8_t* offset1X, int8_t* offset1Y,
int8_t* region1X, int8_t* region1Y,
int8_t* offset2X, int8_t* offset2Y,
int8_t* region2X, int8_t* region2Y,
int8_t* channel1, int8_t* channel2,
float* threshold,
float* histogram,
const int node, const int tree, const int numLabels) {
assert(threadIdx.x == 0);
assert(blockDim.y == 1);
assert(blockDim.x == 1);
*leftNodeOffset = getLeftNodeOffset(node, tree);
*type = getType(node, tree);
char4 param1 = getParam1(node, tree);
*offset1X = param1.x;
*offset1Y = param1.y;
*region1X = param1.z;
*region1Y = param1.w;
char4 param2 = getParam2(node, tree);
*offset2X = param2.x;
*offset2Y = param2.y;
*region2X = param2.z;
*region2Y = param2.w;
ushort2 channels = getChannels(node, tree);
*channel1 = channels.x;
*channel2 = channels.y;
*threshold = getThreshold(node, tree);
for (int label = 0; label < numLabels; label++) {
histogram[label] = getHistogramValue(label, node, tree);
}
}
// for the unit test
TreeNodeData getTreeNode(const int nodeNr, const boost::shared_ptr<const TreeNodes>& treeData) {
treeCache.copyTree(3, treeData.get());
const size_t nodeOffset = nodeNr - treeData->getTreeId();
const size_t numLabels = treeData->numLabels();
cuv::ndarray<int, cuv::dev_memory_space> leftNodeOffset(1);
cuv::ndarray<int, cuv::dev_memory_space> type(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> offset1X(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> offset1Y(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> region1X(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> region1Y(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> offset2X(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> offset2Y(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> region2X(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> region2Y(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> channel1(1);
cuv::ndarray<int8_t, cuv::dev_memory_space> channel2(1);
cuv::ndarray<float, cuv::dev_memory_space> threshold(1);
cuv::ndarray<float, cuv::dev_memory_space> histogram(numLabels);
int treeNr = treeCache.getElementPos(treeData.get());
fetchTreeNodeData<<<1,1>>>(leftNodeOffset.ptr(),
type.ptr(),
offset1X.ptr(), offset1Y.ptr(),
region1X.ptr(), region1Y.ptr(),
offset2X.ptr(), offset2Y.ptr(),
region2X.ptr(), region2Y.ptr(),
channel1.ptr(), channel2.ptr(),
threshold.ptr(), histogram.ptr(),
nodeOffset, treeNr, numLabels);
cudaSafeCall(cudaDeviceSynchronize());
TreeNodeData data;
data.leftNodeOffset = leftNodeOffset[0];
data.type = type[0];
data.offset1X = offset1X[0];
data.offset1Y = offset1Y[0];
data.region1X = region1X[0];
data.region1Y = region1Y[0];
data.offset2X = offset2X[0];
data.offset2Y = offset2Y[0];
data.region2X = region2X[0];
data.region2Y = region2Y[0];
data.channel1 = channel1[0];
data.channel2 = channel2[0];
data.threshold = threshold[0];
data.histogram = cuv::ndarray<float, cuv::host_memory_space>(numLabels);
for (size_t label = 0; label < numLabels; label++) {
data.histogram[label] = histogram(label);
}
return data;
}
__global__ void classifyKernel(
float* output, int tree,
const int16_t imageWidth, const int16_t imageHeight,
const LabelType numLabels, bool useDepthImages) {
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= imageWidth) {
return;
}
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y >= imageHeight) {
return;
}
float depth;
// depth might be nan here
if (useDepthImages)
depth = averageRegionDepth(0, imageWidth, imageHeight, x, x + 1, y, y + 1);
else
depth = 1;
int currentNodeOffset = 0;
while (true) {
const int16_t leftNodeOffset = getLeftNodeOffset(currentNodeOffset, tree);
assert(leftNodeOffset == -1 || leftNodeOffset > 0);
if (leftNodeOffset < 0) {
assert(isnan(getThreshold(currentNodeOffset, tree)));
for (LabelType label = 0; label < numLabels; label++) {
float v = getHistogramValue(label, currentNodeOffset, tree);
assert(!isnan(v));
assert(v >= 0.0);
output[label * imageWidth * imageHeight + y * imageWidth + x] += v;
}
// leaf node
return;
}
char4 param1 = getParam1(currentNodeOffset, tree);
int8_t offset1X = param1.x;
int8_t offset1Y = param1.y;
int8_t region1X = param1.z;
int8_t region1Y = param1.w;
char4 param2 = getParam2(currentNodeOffset, tree);
int8_t offset2X = param2.x;
int8_t offset2Y = param2.y;
int8_t region2X = param2.z;
int8_t region2Y = param2.w;
FeatureResponseType featureResponse;
switch (getType(currentNodeOffset, tree)) {
case COLOR: {
ushort2 channels = getChannels(currentNodeOffset, tree);
featureResponse = calculateColorFeature(0,
imageWidth, imageHeight,
offset1X, offset1Y,
offset2X, offset2Y,
region1X, region1Y,
region2X, region2Y,
channels.x, channels.y,
x, y, depth);
}
break;
case DEPTH:
// assert(false);
featureResponse = calculateDepthFeature(0,
imageWidth, imageHeight,
offset1X, offset1Y,
offset2X, offset2Y,
region1X, region1Y,
region2X, region2Y,
x, y, depth);
break;
}
float threshold = getThreshold(currentNodeOffset, tree);
assert(!isnan(threshold));
int value = static_cast<int>(!(featureResponse <= threshold));
currentNodeOffset += leftNodeOffset + value;
}
}
__global__ void normalizeProbabilitiesKernel(float* probabilities, int numLabels, int width, int height) {
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= width) {
return;
}
const unsigned int y = blockIdx.y;
assert(y < height);
float sum = 0.0;
for (int label = 0; label < numLabels; label++) {
sum += probabilities[label * width * height + y * width + x];
}
if (sum == 0) {
return;
}
for (int label = 0; label < numLabels; label++) {
probabilities[label * width * height + y * width + x] /= sum;
}
}
__global__ void maxProbabilitiesKernel(const float* probabilities, LabelType* output, int numLabels, int width,
int height) {
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= width) {
return;
}
const unsigned int y = blockIdx.y;
assert(y < height);
LabelType maxLabel = 0;
float max = 0.0;
for (LabelType label = 0; label < numLabels; label++) {
const float probability = probabilities[label * width * height + y * width + x];
if (probability > max) {
max = probability;
maxLabel = label;
}
}
output[y * width + x] = maxLabel;
}
void normalizeProbabilities(cuv::ndarray<float, cuv::dev_memory_space>& probabilities) {
utils::Profile profileClassifyImage("normalizeProbabilities");
cudaStream_t stream = streams[0];
const unsigned int numLabels = probabilities.shape(0);
const unsigned int height = probabilities.shape(1);
const unsigned int width = probabilities.shape(2);
unsigned int threadsPerBlock = std::min(width, 128u);
int blocks = std::ceil(width / static_cast<float>(threadsPerBlock));
dim3 threads(threadsPerBlock);
dim3 blockSize(blocks, height);
normalizeProbabilitiesKernel<<<blockSize, threads, 0, stream>>>(probabilities.ptr(), numLabels, width, height);
cudaSafeCall(cudaStreamSynchronize(stream));
}
void determineMaxProbabilities(const cuv::ndarray<float, cuv::dev_memory_space>& probabilities,
cuv::ndarray<LabelType, cuv::dev_memory_space>& output) {
utils::Profile profileClassifyImage("determineMaxProbabilities");
const unsigned int numLabels = probabilities.shape(0);
const unsigned int height = probabilities.shape(1);
const unsigned int width = probabilities.shape(2);
assert(output.shape(0) == height);
assert(output.shape(1) == width);
cudaStream_t stream = streams[0];
unsigned int threadsPerBlock = std::min(width, 128u);
int blocks = std::ceil(width / static_cast<float>(threadsPerBlock));
dim3 threads(threadsPerBlock);
dim3 blockSize(blocks, height);
maxProbabilitiesKernel<<<blockSize, threads, 0, stream>>>(probabilities.ptr(), output.ptr(), numLabels, width, height);
cudaSafeCall(cudaStreamSynchronize(stream));
}
void classifyImage(int treeCacheSize, cuv::ndarray<float, cuv::dev_memory_space>& output, const RGBDImage& image,
LabelType numLabels, const boost::shared_ptr<const TreeNodes>& treeData, bool useDepthImages) {
std::set<const RGBDImage*> images;
images.insert(&image);
tbb::mutex::scoped_lock lock(textureMutex);
utils::Profile profileClassifyImage("classifyImage");
imageCache.copyImages(1, images);
cudaStream_t stream = streams[0];
assert(output.shape(0) == numLabels);
assert(output.shape(1) == static_cast<unsigned int>(image.getHeight()));
assert(output.shape(2) == static_cast<unsigned int>(image.getWidth()));
const int threadsPerRow = 8;
const int threadsPerColumn = 16;
int blocksX = std::ceil(image.getWidth() / static_cast<float>(threadsPerRow));
int blocksY = std::ceil(image.getHeight() / static_cast<float>(threadsPerColumn));
dim3 threads(threadsPerRow, threadsPerColumn);
dim3 blockSize(blocksX, blocksY);
treeCache.copyTree(treeCacheSize, treeData.get());
size_t tree = treeCache.getElementPos(treeData.get());
utils::Profile profileClassifyImageKernel("classifyImageKernel");
cudaSafeCall(cudaFuncSetCacheConfig(classifyKernel, cudaFuncCachePreferL1));
classifyKernel<<<blockSize, threads, 0, stream>>>(output.ptr(), tree,
image.getWidth(), image.getHeight(),
numLabels, useDepthImages);
cudaSafeCall(cudaStreamSynchronize(stream));
}
__global__ void featureResponseKernel(
FeatureResponseType* featureResponses1,
FeatureResponseType* featureResponses2,
const int8_t* types,
const int16_t imageWidth, const int16_t imageHeight,
const int8_t* offsets1X, const int8_t* offsets1Y,
const int8_t* offsets2X, const int8_t* offsets2Y,
const int8_t* regions1X, const int8_t* regions1Y,
const int8_t* regions2X, const int8_t* regions2Y,
const int8_t* channels1, const int8_t* channels2,
const int* samplesX, const int* samplesY, const float* depths,
const int* imageNumbers, const HorizontalFlipSetting* sampleHorFlipSetting, unsigned int numFeatures, unsigned int numSamples) {
unsigned int feature = blockIdx.x * blockDim.y + threadIdx.y;
unsigned int sample = blockIdx.y * blockDim.x + threadIdx.x;
if (feature >= numFeatures || sample >= numSamples) {
return;
}
int8_t type = types[feature];
assert(type == COLOR || type == DEPTH);
int8_t offset1X = offsets1X[feature];
int8_t offset1Y = offsets1Y[feature];
int8_t offset2X = offsets2X[feature];
int8_t offset2Y = offsets2Y[feature];
int8_t region1X = regions1X[feature];
int8_t region1Y = regions1Y[feature];
int8_t region2X = regions2X[feature];
int8_t region2Y = regions2Y[feature];
int imageNr = imageNumbers[sample];
FeatureResponseType featureResponse1;
FeatureResponseType featureResponse2 = 0;
HorizontalFlipSetting horFlipSetting = sampleHorFlipSetting[sample];
if (horFlipSetting == Flip)
{
offset1X = -offset1X;
offset2X = -offset2X;
}
switch (type) {
case COLOR:
{ featureResponse1 = calculateColorFeature(imageNr,
imageWidth, imageHeight,
offset1X, offset1Y,
offset2X, offset2Y,
region1X, region1Y,
region2X, region2Y,
channels1[feature], channels2[feature],
samplesX[sample], samplesY[sample], depths[sample]);
if (horFlipSetting == Both) {
featureResponse2 = calculateColorFeature(imageNr,
imageWidth, imageHeight,
-offset1X, offset1Y,
-offset2X, offset2Y,
region1X, region1Y,
region2X, region2Y,
channels1[feature], channels2[feature],
samplesX[sample], samplesY[sample], depths[sample]);}}
break;
case DEPTH:
{ featureResponse1 = calculateDepthFeature(imageNr,
imageWidth, imageHeight,
offset1X, offset1Y,
offset2X, offset2Y,
region1X, region1Y,
region2X, region2Y,
samplesX[sample], samplesY[sample], depths[sample]);
if (horFlipSetting == Both) {
featureResponse2 = calculateDepthFeature(imageNr,
imageWidth, imageHeight,
-offset1X, offset1Y,
-offset2X, offset2Y,
region1X, region1Y,
region2X, region2Y,
samplesX[sample], samplesY[sample], depths[sample]);}}
break;
default:
assert(false);
break;
}
featureResponses1[featureResponseOffset(sample, feature, numSamples, numFeatures)] = featureResponse1;
if (horFlipSetting == Both)
{featureResponses2[featureResponseOffset(sample, feature, numSamples, numFeatures)] = featureResponse2;}
}
// http://stackoverflow.com/questions/600293/how-to-check-if-a-number-is-a-power-of-2
#ifndef NDEBUG
__device__
static bool isPowerOfTwo(size_t x) {
return (x != 0) && ((x & (x - 1)) == 0);
}
#endif
__global__ void scoreKernel(const WeightType* counters,
const float* thresholds,
unsigned int numThresholds,
unsigned int numLabels,
unsigned int numFeatures,
const WeightType* allClasses,
ScoreType* scores) {
unsigned int feature = blockIdx.x * blockDim.x + threadIdx.x;
if (feature >= numFeatures) {
return;
}
unsigned int thresh = blockIdx.y;
WeightType totals[2] = { 0, 0 };
for (unsigned int label = 0; label < numLabels; label++) {
for (unsigned int value = 0; value < 2; value++) {
unsigned int cidx = counterOffset(label, value, thresh, feature, numLabels, numFeatures,
numThresholds);
WeightType counter = counters[cidx];
totals[value] += counter;
}
}
const WeightType* leftClasses = counters
+ counterOffset(0, 0, thresh, feature, numLabels, numFeatures, numThresholds);
const WeightType* rightClasses = counters
+ counterOffset(0, 1, thresh, feature, numLabels, numFeatures, numThresholds);
assert(rightClasses == leftClasses + 1);
unsigned int leftRightStride = 2;
#ifndef NDEBUG
unsigned int off0 = counterOffset(0, 0, thresh, feature, numLabels, numFeatures, numThresholds);
unsigned int off1 = counterOffset(1, 0, thresh, feature, numLabels, numFeatures, numThresholds);
assert(leftRightStride == off1 - off0);
#endif
ScoreType score = NormalizedInformationGainScore::calculateScore(numLabels, leftClasses, rightClasses,
leftRightStride, allClasses, static_cast<ScoreType>(totals[0]), static_cast<ScoreType>(totals[1]));
scores[thresh * numFeatures + feature] = score;
}
__global__ void aggregateHistogramsKernel(
const FeatureResponseType* featureResponses1,
const FeatureResponseType* featureResponses2,
WeightType* counters,
const float* thresholds,
const uint8_t* sampleLabel,
const HorizontalFlipSetting* sampleHorFlipSetting,
unsigned int numThresholds,
unsigned int numLabels,
unsigned int numFeatures,
unsigned int numSamples
) {
#ifndef NDEBUG
const unsigned int COUNTER_MAX = 0xFFFF;
#endif
// shape: 2 * numLabels * threadsPerBlock
extern __shared__ unsigned short counterShared[];
unsigned int feature = blockIdx.y;
unsigned int thresh = blockIdx.x;
assert(feature < numFeatures);
assert(thresh < numThresholds);
unsigned int offset = thresh * numFeatures + feature;
const float threshold = thresholds[offset];
// initialize shared memory
// every thread must initialize 2*numLabels counters with zero
for (unsigned int i = threadIdx.x; i < 2 * numLabels * blockDim.x; i += blockDim.x) {
counterShared[i] = 0;
}
__syncthreads();
unsigned int labelFlags = 0;
// iterate over all samples and increment the according counter in shared memory
const FeatureResponseType* resultPtr1 = featureResponses1
+ featureResponseOffset(threadIdx.x, feature, numSamples, numFeatures);
const FeatureResponseType* resultPtr2 = featureResponses2
+ featureResponseOffset(threadIdx.x, feature, numSamples, numFeatures);
for (unsigned int sample = threadIdx.x; sample < numSamples; sample += blockDim.x) {
FeatureResponseType featureResponse1 = *resultPtr1;
resultPtr1 += blockDim.x; // need to change if featureResponseOffset calculation changes
uint8_t label = sampleLabel[sample];
HorizontalFlipSetting horFlipSetting = sampleHorFlipSetting[sample];
assert(label < numLabels);
assert(label < 32);
labelFlags |= 1 << label;
int value = static_cast<int>(!(featureResponse1 <= threshold));
assert(value == 0 || value == 1);
assert(counterShared[(2 * label) * blockDim.x + 2 * threadIdx.x + value] < COUNTER_MAX);
counterShared[(2 * label) * blockDim.x + 2 * threadIdx.x + value]++;
if (horFlipSetting == Both){
FeatureResponseType featureResponse2 = *resultPtr2;
value = static_cast<int>(!(featureResponse2 <= threshold));
assert(value == 0 || value == 1);
assert(counterShared[(2 * label) * blockDim.x + 2 * threadIdx.x + value] < COUNTER_MAX);
counterShared[(2 * label) * blockDim.x + 2 * threadIdx.x + value]++;}
resultPtr2 += blockDim.x; // need to change if featureResponseOffset calculation changes
// no need to sync here because data is accessed only by the same thread in this loop
}
// no sync needed here because it is done in the loop over the labels
assert(isPowerOfTwo(blockDim.x));
// reduce the 2*labels*threads counters in shared memory to 2*labels counters
for (uint8_t label = 0; label < numLabels; label++) {
// skip labels without samples
if (__syncthreads_or(labelFlags & (1 << label)) == 0) { //this part sometimes causes problems but it's slower without it
if (threadIdx.x < 2) {
counterShared[2 * label + threadIdx.x] = 0;
}
continue;
}
unsigned int idxA = (2 * label) * blockDim.x + threadIdx.x;
for (unsigned int offset = blockDim.x; offset > 2; offset /= 2) {
if (threadIdx.x < offset) {
// check for counter overflow
assert(COUNTER_MAX - counterShared[idxA] >= counterShared[idxA + offset]);
counterShared[idxA] += counterShared[idxA + offset];
}
__syncthreads();
}
if (threadIdx.x < 2) {
// write final result to a different (already unused) location in shared memory
// this way, bank conflicts are avoided at the very end, when data is loaded from shared memory to write it to global memory in a coalesced manner
counterShared[2 * label + threadIdx.x] = counterShared[idxA] + counterShared[idxA + 2];
}
}
if (threadIdx.x < 2 * numLabels) {
const unsigned int label = threadIdx.x / 2;
const unsigned int value = threadIdx.x % 2;
assert(threadIdx.x == 2 * label + value);
const unsigned short count = counterShared[threadIdx.x];
const unsigned int cidx = counterOffset(label, value, thresh, feature, numLabels, numFeatures,
numThresholds);
counters[cidx] += count;
}
}
void ImageFeatureEvaluation::selectDevice() {
int currentDeviceId;
cudaSafeCall(cudaGetDevice(¤tDeviceId));
const std::vector<int> deviceIds = configuration.getDeviceIds();
const int targetDeviceId = deviceIds[treeId % deviceIds.size()];
if (currentDeviceId != targetDeviceId) {
CURFIL_DEBUG("tree " << treeId << ": switching from device " << currentDeviceId << " to " << targetDeviceId);
cudaSafeCall(cudaSetDevice(targetDeviceId));
cudaSafeCall(cudaGetDevice(¤tDeviceId));
if (currentDeviceId != targetDeviceId) {
throw std::runtime_error("failed to switch GPU device");
}
}
}
void ImageFeatureEvaluation::initDevice() {
selectDevice();
cudaDeviceProp prop;
int currentDeviceId;
cudaSafeCall(cudaGetDevice(¤tDeviceId));
cudaSafeCall(cudaGetDeviceProperties(&prop, currentDeviceId));
CURFIL_INFO("GPU Device " << currentDeviceId << ": " << prop.name);
{
tbb::mutex::scoped_lock initLock(initMutex);
if (!initialized) {
for (int i = 0; i < NUM_STREAMS; i++) {
cudaSafeCall(cudaStreamCreate(&streams[i]));
}
initialized = true;
CURFIL_DEBUG("created " << NUM_STREAMS << " streams");
}
}
}
static void addBatch(RandomTree<PixelInstance, ImageFeatureFunction>& node,
std::vector<std::vector<const PixelInstance*> >& batches,
std::vector<const PixelInstance*>& currentBatch,
std::set<const RGBDImage*>& imagesInCurrentBatch) {
assert(!currentBatch.empty());
unsigned int batchNr = batches.size();
std::set<LabelType> labels;
for (size_t i = 0; i < currentBatch.size(); i++) {
labels.insert(currentBatch[i]->getLabel());
}
node.setTimerAnnotation((boost::format("batch%d.numSamples") % batchNr).str(), currentBatch.size());
node.setTimerAnnotation((boost::format("batch%d.numImages") % batchNr).str(), imagesInCurrentBatch.size());
node.setTimerAnnotation((boost::format("batch%d.numLabels") % batchNr).str(), labels.size());
CURFIL_DEBUG((boost::format("batch%d.numSamples: %d") % batchNr % currentBatch.size()).str());
CURFIL_DEBUG((boost::format("batch%d.numImages: %d") % batchNr % imagesInCurrentBatch.size()).str());
batches.push_back(currentBatch);
currentBatch.clear();
imagesInCurrentBatch.clear();
}
std::vector<std::vector<const PixelInstance*> > ImageFeatureEvaluation::prepare(
const std::vector<const PixelInstance*>& hostSamples,
RandomTree<PixelInstance, ImageFeatureFunction>& node, cuv::dev_memory_space, bool keepMutexLocked) {
selectDevice();
assert(hostSamples.size() > 0);
textureMutex.lock();
utils::Timer prepareTime;
std::vector<const PixelInstance*> samples;
// take samples with cached images first. then the uncached images
for (size_t sample = 0; sample < hostSamples.size(); sample++) {
if (imageCache.containsElement(hostSamples[sample]->getRGBDImage())) {
samples.push_back(hostSamples[sample]);
}
}
for (size_t sample = 0; sample < hostSamples.size(); sample++) {
if (!imageCache.containsElement(hostSamples[sample]->getRGBDImage())) {
samples.push_back(hostSamples[sample]);
}
}
assert(samples.size() == hostSamples.size());
std::vector<std::vector<const PixelInstance*> > batches;
std::vector<const PixelInstance*> currentBatch;
std::set<const RGBDImage*> imagesInCurrentBatch;
for (size_t sampleNr = 0; sampleNr < samples.size(); sampleNr++) {
const PixelInstance* sample = samples[sampleNr];
assert(sample->getDepth().isValid());
if ((imagesInCurrentBatch.find(sample->getRGBDImage()) == imagesInCurrentBatch.end()
&& imagesInCurrentBatch.size() >= static_cast<size_t>(configuration.getImageCacheSize()))
|| currentBatch.size() == configuration.getMaxSamplesPerBatch()) {
addBatch(node, batches, currentBatch, imagesInCurrentBatch);
}
imagesInCurrentBatch.insert(sample->getRGBDImage());
currentBatch.push_back(sample);
}
if (!currentBatch.empty()) {
addBatch(node, batches, currentBatch, imagesInCurrentBatch);
}
assert(!batches.empty());
imageWidth = batches[0][0]->width();
imageHeight = batches[0][0]->height();
node.setTimerValue("prepareBatches", prepareTime);
if (!keepMutexLocked) {
textureMutex.unlock();
}
return batches;
}
template<>
void ImageFeatureEvaluation::sortFeatures(
ImageFeaturesAndThresholds<cuv::dev_memory_space>& featuresAndThresholds,
const cuv::ndarray<int, cuv::dev_memory_space>& keysIndices) const {
utils::Profile profile("sortFeatures");
unsigned int numFeatures = configuration.getFeatureCount();
ImageFeaturesAndThresholds<cuv::dev_memory_space> sortedFeaturesAndThresholds(numFeatures,
configuration.getThresholds(), featuresAllocator);
thrust::device_ptr<int> k(keysIndices[cuv::indices[0][cuv::index_range()]].ptr());
thrust::device_ptr<int> i(keysIndices[cuv::indices[1][cuv::index_range()]].ptr());
thrust::sort_by_key(k, k + numFeatures, i);
cuv::ndarray<int8_t, cuv::dev_memory_space> features = featuresAndThresholds.features();
cuv::ndarray<int8_t, cuv::dev_memory_space> sortedFeatures = sortedFeaturesAndThresholds.features();
assert(features.shape() == sortedFeatures.shape());
const size_t dim = features.shape(0);
assert(dim == 11);
for (size_t d = 0; d < dim; d++) {
thrust::device_ptr<int8_t> ptr(features[cuv::indices[d][cuv::index_range()]].ptr());
thrust::device_ptr<int8_t> sortedPtr(sortedFeatures[cuv::indices[d][cuv::index_range()]].ptr());
thrust::gather(i, i + numFeatures, ptr, sortedPtr);
}
for (size_t thresh = 0; thresh < configuration.getThresholds(); thresh++) {
thrust::device_ptr<float> thresholdsPtr(
featuresAndThresholds.thresholds()[cuv::indices[thresh][cuv::index_range()]].ptr());
thrust::device_ptr<float> sortedThresholdsPtr(
sortedFeaturesAndThresholds.thresholds()[cuv::indices[thresh][cuv::index_range()]].ptr());
thrust::gather(i, i + numFeatures, thresholdsPtr, sortedThresholdsPtr);
}
featuresAndThresholds = sortedFeaturesAndThresholds;
}
ImageFeaturesAndThresholds<cuv::dev_memory_space> ImageFeatureEvaluation::generateRandomFeatures(
const std::vector<const PixelInstance*>& samples, int seed, const bool sort, cuv::dev_memory_space) {
unsigned int numFeatures = configuration.getFeatureCount();
unsigned int numThresholds = configuration.getThresholds();
tbb::mutex::scoped_lock textureLock(textureMutex);
Samples<cuv::dev_memory_space> samplesOnDevice = copySamplesToDevice(samples, streams[0]);
ImageFeaturesAndThresholds<cuv::dev_memory_space> featuresAndThresholds(numFeatures, numThresholds,
featuresAllocator);
cuv::ndarray<int, cuv::dev_memory_space> keysIndices(2, numFeatures, keysIndicesAllocator);
int threadsPerBlock = std::min(numFeatures, 128u);
int blocks = std::ceil(numFeatures / static_cast<float>(threadsPerBlock));
const size_t numSamples = samplesOnDevice.data.shape(1);
assert(numSamples == samples.size());
{
cudaSafeCall(cudaFuncSetCacheConfig(generateRandomFeaturesKernel, cudaFuncCachePreferL1));
utils::Profile profile("generateRandomFeatures");
generateRandomFeaturesKernel<<<blocks, threadsPerBlock, 0, streams[0]>>>(seed,
numFeatures,
keysIndices[cuv::indices[0][cuv::index_range()]].ptr(),
keysIndices[cuv::indices[1][cuv::index_range()]].ptr(),
configuration.getBoxRadius(), configuration.getRegionSize(),
featuresAndThresholds.types().ptr(),
featuresAndThresholds.offset1X().ptr(), featuresAndThresholds.offset1Y().ptr(),
featuresAndThresholds.region1X().ptr(), featuresAndThresholds.region1Y().ptr(),
featuresAndThresholds.offset2X().ptr(), featuresAndThresholds.offset2Y().ptr(),
featuresAndThresholds.region2X().ptr(), featuresAndThresholds.region2Y().ptr(),
featuresAndThresholds.channel1().ptr(), featuresAndThresholds.channel2().ptr(),
featuresAndThresholds.thresholds().ptr(),
numThresholds,
numSamples,
imageWidth, imageHeight,
samplesOnDevice.imageNumbers,
samplesOnDevice.depths,
samplesOnDevice.sampleX,
samplesOnDevice.sampleY,
samplesOnDevice.labels,
configuration.isUseDepthImages()
);
if (profile.isEnabled()) {
cudaSafeCall(cudaStreamSynchronize(streams[0]));
}
}
if (sort) {
sortFeatures(featuresAndThresholds, keysIndices);
}
cudaSafeCall(cudaStreamSynchronize(streams[0]));
return featuresAndThresholds;
}
template<>
cuv::ndarray<WeightType, cuv::dev_memory_space> ImageFeatureEvaluation::calculateFeatureResponsesAndHistograms(
RandomTree<PixelInstance, ImageFeatureFunction>& node,
const std::vector<std::vector<const PixelInstance*> >& batches,
const ImageFeaturesAndThresholds<cuv::dev_memory_space>& featuresAndThresholds,
cuv::ndarray<FeatureResponseType, cuv::host_memory_space>* featureResponsesHost) {
unsigned int numFeatures = configuration.getFeatureCount();
unsigned int numThresholds = configuration.getThresholds();
const size_t numLabels = node.getNumClasses();
#ifndef NDEBUG
{
size_t numLabelsCheck = 0;
for (size_t batch = 0; batch < batches.size(); batch++) {
for (size_t sample = 0; sample < batches[batch].size(); sample++) {
numLabelsCheck = std::max(numLabelsCheck,
static_cast<size_t>(batches[batch][sample]->getLabel() + 1));
}
}
if (numLabelsCheck > numLabels) {
CURFIL_DEBUG("numLabelsCheck: " << numLabelsCheck);
CURFIL_DEBUG("numLabels: " << numLabels);
assert(false);
}
}
#endif
// see function counterOffset()
// features × threshold × labels × 2
std::vector<unsigned int> shape;
shape.push_back(numFeatures);
shape.push_back(numThresholds);
shape.push_back(numLabels);
shape.push_back(2);
cuv::ndarray<WeightType, cuv::dev_memory_space> counters(shape, countersAllocator);
cudaSafeCall(cudaMemsetAsync(counters.ptr(), 0,
static_cast<size_t>(counters.size() * sizeof(WeightType)), streams[0]));
assert(numFeatures == configuration.getFeatureCount());
cuv::ndarray<FeatureResponseType, cuv::dev_memory_space> featureResponsesDevice1(numFeatures,
configuration.getMaxSamplesPerBatch(), featureResponsesAllocator);
cuv::ndarray<FeatureResponseType, cuv::dev_memory_space> featureResponsesDevice2(featureResponsesAllocator);
if (featureResponsesHost) {
size_t totalSamples = 0;
for (size_t batch = 0; batch < batches.size(); batch++) {
totalSamples += batches[batch].size();
}
featureResponsesHost->resize(numFeatures, totalSamples);
}
FeatureResponseType* featureResponses2ptr;
featureResponses2ptr = 0;
size_t samplesProcessed = 0;
{
for (size_t batch = 0; batch < batches.size(); batch++) {
const std::vector<const PixelInstance*>& currentBatch = batches[batch];
unsigned int batchSize = currentBatch.size();
if (batch > 0) {
textureMutex.lock();
}
Samples<cuv::dev_memory_space> sampleData = copySamplesToDevice(currentBatch, streams[0]);
featureResponsesDevice1.resize(numFeatures, batchSize);
if (configuration.doHorizontalFlipping())
{ featureResponsesDevice2.resize(numFeatures, batchSize);
featureResponses2ptr = featureResponsesDevice2.ptr();
}
unsigned int featuresPerBlock = std::min(numFeatures, 32u);
unsigned int samplesPerBlock = std::min(batchSize, 4u);
int featureBlocks = std::ceil(numFeatures / static_cast<float>(featuresPerBlock));
int sampleBlocks = std::ceil(batchSize / static_cast<float>(samplesPerBlock));
dim3 blockSize(featureBlocks, sampleBlocks);
dim3 threads(samplesPerBlock, featuresPerBlock);
CURFIL_DEBUG("feature response kernel: launching " << blockSize.x << "x" <<blockSize.y
<< " blocks with " << threads.x << "x" << threads.y << " threads");
cudaSafeCall(cudaStreamSynchronize(streams[0]));
utils::Timer featureResponseTimer;
{
cudaSafeCall(cudaFuncSetCacheConfig(featureResponseKernel, cudaFuncCachePreferL1));
utils::Profile profile("calculate feature responses");
featureResponseKernel<<<blockSize, threads, 0, streams[0]>>>(
featureResponsesDevice1.ptr(),
featureResponses2ptr,
featuresAndThresholds.types().ptr(),
imageWidth, imageHeight,
featuresAndThresholds.offset1X().ptr(), featuresAndThresholds.offset1Y().ptr(),
featuresAndThresholds.offset2X().ptr(), featuresAndThresholds.offset2Y().ptr(),
featuresAndThresholds.region1X().ptr(), featuresAndThresholds.region1Y().ptr(),
featuresAndThresholds.region2X().ptr(), featuresAndThresholds.region2Y().ptr(),
featuresAndThresholds.channel1().ptr(), featuresAndThresholds.channel2().ptr(),
sampleData.sampleX, sampleData.sampleY, sampleData.depths, sampleData.imageNumbers, sampleData.horFlipSetting,
numFeatures,
batchSize
);
if (profile.isEnabled()) {
cudaSafeCall(cudaStreamSynchronize(streams[0]));
}
}
cudaSafeCall(cudaStreamSynchronize(streams[0]));
//please note that featureResponsesDevice2 was not added
if (featureResponsesHost) {
// append feature responses on device to the feature responses for our caller
(*featureResponsesHost)[cuv::indices[cuv::index_range()][cuv::index_range(samplesProcessed,
samplesProcessed + batchSize)]] = featureResponsesDevice1;
}
node.addTimerValue("featureResponse", featureResponseTimer);
node.setTimerValue((boost::format("batch%d.featureResponse") % batch).str(), featureResponseTimer);
textureMutex.unlock();
utils::Timer aggregateHistogramsTimer;
assert(numLabels > 0);
{
int threadsPerBlock = 128;
if (batchSize <= 3000) {
threadsPerBlock = 64;
}
dim3 blockSize(numThresholds, numFeatures);
dim3 threads(threadsPerBlock);
utils::Profile profile((boost::format("aggregate histograms (%d samples)") % batchSize).str());
unsigned int sharedMemory = sizeof(unsigned short) * 2 * numLabels * threadsPerBlock;
cudaSafeCall(cudaFuncSetCacheConfig(aggregateHistogramsKernel, cudaFuncCachePreferShared));
aggregateHistogramsKernel<<<blockSize, threads, sharedMemory, streams[1]>>>(
featureResponsesDevice1.ptr(),
featureResponses2ptr,
counters.ptr(),
featuresAndThresholds.thresholds().ptr(),
sampleData.labels,
sampleData.horFlipSetting,
numThresholds,
numLabels,
numFeatures,
batchSize
);
if (profile.isEnabled()) {
cudaSafeCall(cudaStreamSynchronize(streams[1]));
}
}
cudaSafeCall(cudaStreamSynchronize(streams[1]));
node.addTimerValue("aggregateHistograms", aggregateHistogramsTimer);
node.setTimerValue((boost::format("batch%d.aggregateHistograms") % batch).str(),
aggregateHistogramsTimer);
samplesProcessed += batchSize;
}
}
return counters;
}
template<>
cuv::ndarray<ScoreType, cuv::host_memory_space> ImageFeatureEvaluation::calculateScores(
const cuv::ndarray<WeightType, cuv::dev_memory_space>& counters,
const ImageFeaturesAndThresholds<cuv::dev_memory_space>& featuresAndThresholds,
const cuv::ndarray<WeightType, cuv::dev_memory_space>& histogram) {
const unsigned int numFeatures = configuration.getFeatureCount();
const unsigned int numThresholds = configuration.getThresholds();
cuv::ndarray<ScoreType, cuv::dev_memory_space> scores(numThresholds, numFeatures, scoresAllocator);
const size_t numLabels = histogram.size();
assert(counters.shape(2) == numLabels);
assert(numLabels > 0);
{
int threadsPerBlock = std::min(numFeatures, 128u);
int blocks = std::ceil(numFeatures / static_cast<float>(threadsPerBlock));
dim3 threads(threadsPerBlock);
dim3 blockSize(blocks, numThresholds);
utils::Profile profile("score kernel");
cudaSafeCall(cudaFuncSetCacheConfig(scoreKernel, cudaFuncCachePreferL1));
scoreKernel<<<blockSize, threads, 0, streams[1]>>>(
counters.ptr(),
featuresAndThresholds.thresholds().ptr(),
numThresholds,
numLabels,
numFeatures,
histogram.ptr(),
scores.ptr()
);
if (profile.isEnabled()) {
cudaSafeCall(cudaStreamSynchronize(streams[1]));
}
}
cuv::ndarray<ScoreType, cuv::host_memory_space> scoresCPU(scores, streams[1]);
cudaSafeCall(cudaStreamSynchronize(streams[1]));
return scoresCPU;
}
boost::shared_ptr<const TreeNodes> convertTree(
const boost::shared_ptr<const RandomTreeImage>& randomTreeImage) {
const boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> >& tree =
randomTreeImage->getTree();
utils::Profile profile("convertTree");
TreeNodes treeNodes(tree);
return boost::make_shared<const TreeNodes>(treeNodes);
}
}
|
3cabbac860f8637f55035ae8f65efca75c889b02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define DIM 1024
#define PI 3.1415926535897932f
__global__ void kernel(unsigned char* ptr)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
__shared__ float shared[16][16];
const float period = 128.0f;
shared[threadIdx.x][threadIdx.y] = 255 * (sinf(x*2.0f*PI/period) + 1.0f) * (sinf(y*2.0f*PI/period) + 1.0f) / 4.0f;
__syncthreads();
ptr[offset*4 + 0] = 0;
ptr[offset*4 + 1] = shared[15-threadIdx.x][15-threadIdx.y];
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
int main()
{
CPUBitmap bitmap(DIM, DIM);
unsigned char* dev_bitmap;
HANDLE_ERROR(hipMalloc((void**)&dev_bitmap, bitmap.image_size()));
dim3 grids(DIM/16, DIM/16);
dim3 threads(16, 16);
hipLaunchKernelGGL(( kernel), dim3(grids),dim3(threads), 0, 0, dev_bitmap);
HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost));
bitmap.display_and_exit();
HANDLE_ERROR(hipFree(dev_bitmap));
return 0;
}
| 3cabbac860f8637f55035ae8f65efca75c889b02.cu | #include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define DIM 1024
#define PI 3.1415926535897932f
__global__ void kernel(unsigned char* ptr)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
__shared__ float shared[16][16];
const float period = 128.0f;
shared[threadIdx.x][threadIdx.y] = 255 * (sinf(x*2.0f*PI/period) + 1.0f) * (sinf(y*2.0f*PI/period) + 1.0f) / 4.0f;
__syncthreads();
ptr[offset*4 + 0] = 0;
ptr[offset*4 + 1] = shared[15-threadIdx.x][15-threadIdx.y];
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
int main()
{
CPUBitmap bitmap(DIM, DIM);
unsigned char* dev_bitmap;
HANDLE_ERROR(cudaMalloc((void**)&dev_bitmap, bitmap.image_size()));
dim3 grids(DIM/16, DIM/16);
dim3 threads(16, 16);
kernel<<<grids,threads>>>(dev_bitmap);
HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost));
bitmap.display_and_exit();
HANDLE_ERROR(cudaFree(dev_bitmap));
return 0;
}
|
c1214c6036a90cc35abcc306ae68dff72abd3413.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_4_a;
int xdim0_update_halo_kernel4_plus_4_a_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_4_a;
int ydim0_update_halo_kernel4_plus_4_a_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_4_a;
int xdim1_update_halo_kernel4_plus_4_a_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_4_a;
int ydim1_update_halo_kernel4_plus_4_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel4_plus_4_a * (y) + \
xdim0_update_halo_kernel4_plus_4_a * ydim0_update_halo_kernel4_plus_4_a * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel4_plus_4_a * (y) + \
xdim1_update_halo_kernel4_plus_4_a * ydim1_update_halo_kernel4_plus_4_a * \
(z))
// user function
__device__
inline void
update_halo_kernel4_plus_4_a(double *vol_flux_y, double *mass_flux_y,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Y] == 1)
vol_flux_y[OPS_ACC0(0, 0, 0)] = vol_flux_y[OPS_ACC0(4, 0, 0)];
if (fields[FIELD_MASS_FLUX_Y] == 1)
mass_flux_y[OPS_ACC1(0, 0, 0)] = mass_flux_y[OPS_ACC1(4, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_4_a(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel4_plus_4_a +
idx_z * 1 * 1 * xdim0_update_halo_kernel4_plus_4_a *
ydim0_update_halo_kernel4_plus_4_a;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel4_plus_4_a +
idx_z * 1 * 1 * xdim1_update_halo_kernel4_plus_4_a *
ydim1_update_halo_kernel4_plus_4_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_4_a(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel4_plus_4_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 120))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(120, "update_halo_kernel4_plus_4_a");
OPS_kernels[120].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel4_plus_4_a_h ||
ydim0 != ydim0_update_halo_kernel4_plus_4_a_h ||
xdim1 != xdim1_update_halo_kernel4_plus_4_a_h ||
ydim1 != ydim1_update_halo_kernel4_plus_4_a_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel4_plus_4_a, &xdim0, sizeof(int));
xdim0_update_halo_kernel4_plus_4_a_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel4_plus_4_a, &ydim0, sizeof(int));
ydim0_update_halo_kernel4_plus_4_a_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel4_plus_4_a, &xdim1, sizeof(int));
xdim1_update_halo_kernel4_plus_4_a_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel4_plus_4_a, &ydim1, sizeof(int));
ydim1_update_halo_kernel4_plus_4_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[120].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_4_a), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[120].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[120].mpi_time += t2 - t1;
OPS_kernels[120].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[120].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| c1214c6036a90cc35abcc306ae68dff72abd3413.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_4_a;
int xdim0_update_halo_kernel4_plus_4_a_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_4_a;
int ydim0_update_halo_kernel4_plus_4_a_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_4_a;
int xdim1_update_halo_kernel4_plus_4_a_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_4_a;
int ydim1_update_halo_kernel4_plus_4_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel4_plus_4_a * (y) + \
xdim0_update_halo_kernel4_plus_4_a * ydim0_update_halo_kernel4_plus_4_a * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel4_plus_4_a * (y) + \
xdim1_update_halo_kernel4_plus_4_a * ydim1_update_halo_kernel4_plus_4_a * \
(z))
// user function
__device__
inline void
update_halo_kernel4_plus_4_a(double *vol_flux_y, double *mass_flux_y,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Y] == 1)
vol_flux_y[OPS_ACC0(0, 0, 0)] = vol_flux_y[OPS_ACC0(4, 0, 0)];
if (fields[FIELD_MASS_FLUX_Y] == 1)
mass_flux_y[OPS_ACC1(0, 0, 0)] = mass_flux_y[OPS_ACC1(4, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_4_a(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel4_plus_4_a +
idx_z * 1 * 1 * xdim0_update_halo_kernel4_plus_4_a *
ydim0_update_halo_kernel4_plus_4_a;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel4_plus_4_a +
idx_z * 1 * 1 * xdim1_update_halo_kernel4_plus_4_a *
ydim1_update_halo_kernel4_plus_4_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_4_a(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel4_plus_4_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 120))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(120, "update_halo_kernel4_plus_4_a");
OPS_kernels[120].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel4_plus_4_a_h ||
ydim0 != ydim0_update_halo_kernel4_plus_4_a_h ||
xdim1 != xdim1_update_halo_kernel4_plus_4_a_h ||
ydim1 != ydim1_update_halo_kernel4_plus_4_a_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel4_plus_4_a, &xdim0, sizeof(int));
xdim0_update_halo_kernel4_plus_4_a_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel4_plus_4_a, &ydim0, sizeof(int));
ydim0_update_halo_kernel4_plus_4_a_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel4_plus_4_a, &xdim1, sizeof(int));
xdim1_update_halo_kernel4_plus_4_a_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel4_plus_4_a, &ydim1, sizeof(int));
ydim1_update_halo_kernel4_plus_4_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[120].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel4_plus_4_a<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[120].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[120].mpi_time += t2 - t1;
OPS_kernels[120].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[120].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
df557c9e93c32b62cdb3e98e37f47c04128b2dac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14) {
for (int i=0; i < var_1; ++i) {
comp = -1.6431E-30f * +1.4697E36f;
comp = atan2f(var_2 / (var_3 + +1.8651E35f), sinf((var_4 / powf((-1.9645E-26f + var_5), (var_6 / var_7 + var_8 + +0.0f)))));
if (comp < sinhf(var_9 * +1.4648E-35f - var_10 / +1.0469E36f + var_11 - var_12)) {
comp += (var_13 - (-1.4317E-37f + var_14));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15);
hipDeviceSynchronize();
return 0;
}
| df557c9e93c32b62cdb3e98e37f47c04128b2dac.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14) {
for (int i=0; i < var_1; ++i) {
comp = -1.6431E-30f * +1.4697E36f;
comp = atan2f(var_2 / (var_3 + +1.8651E35f), sinf((var_4 / powf((-1.9645E-26f + var_5), (var_6 / var_7 + var_8 + +0.0f)))));
if (comp < sinhf(var_9 * +1.4648E-35f - var_10 / +1.0469E36f + var_11 - var_12)) {
comp += (var_13 - (-1.4317E-37f + var_14));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15);
cudaDeviceSynchronize();
return 0;
}
|
44451fd5fb5c9a4ad146dab898f8a8a0a2cc0716.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <type_traits>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NestedTensorImpl.h>
#include <ATen/TensorAccessor.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/MemoryAccess.cuh>
#include <ATen/native/hip/PersistentSoftmax.cuh>
#include <ATen/native/hip/block_reduce.cuh>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/nested/NestedTensorTransformerFunctions.h>
#ifdef USE_FLASH_ATTENTION
#include <ATen/native/transformers/hip/flash_attn/fmha_api.h>
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_forward.h>
#endif
#include <ATen/native/transformers/hip/sdp_utils.h>
namespace at {
namespace native {
namespace {
#define DISPATCH_BLOCKSIZE(VALUE_HEAD_DIM, FN) \
{ \
if (VALUE_HEAD_DIM <= 64) { \
constexpr bool kIs64x64 = true; \
constexpr bool kSingleValueIteration = true; \
FN(); \
} else { \
constexpr bool kIs64x64 = false; \
if (VALUE_HEAD_DIM <= 128) { \
constexpr bool kSingleValueIteration = true; \
FN(); \
} else { \
constexpr bool kSingleValueIteration = false; \
FN(); \
} \
} \
}
#define DISPATCH_KERNEL(QUERY, KEY, VALUE, FUNC) \
{ \
hipDeviceProp_t* properties = \
at::cuda::getDeviceProperties(QUERY.device().index()); \
const int computeCapability = properties->major * 10 + properties->minor; \
DISPATCH_BLOCKSIZE( \
VALUE.size(-1), ([&]() { \
static constexpr int64_t kQueriesPerBlock = kIs64x64 ? 64 : 32; \
static constexpr int64_t kKeysPerBlock = kIs64x64 ? 64 : 128; \
DISPATCH_TYPES( \
QUERY, ([&]() { \
DISPATCH_ARCHTAG( \
computeCapability, ([&]() { \
using AlignedAK = AttentionKernel< \
scalar_t, \
ArchTag, \
true, \
kQueriesPerBlock, \
kKeysPerBlock, \
kSingleValueIteration>; \
/* Run a more efficient kernel (with `isAligned=True`) \
if memory is correctly aligned*/ \
bool isAligned = \
(QUERY.stride(2) % AlignedAK::kAlignmentQ == 0 && \
KEY.stride(2) % AlignedAK::kAlignmentK == 0 && \
VALUE.stride(2) % AlignedAK::kAlignmentV == 0); \
/* TODO: Should we warn or log somewhere when we use a \
less efficient kernel due to wrong alignment? */ \
DISPATCH_BOOL(isAligned, kIsAligned, ([&]() { \
using Kernel = AttentionKernel< \
scalar_t, \
ArchTag, \
kIsAligned, \
kQueriesPerBlock, \
kKeysPerBlock, \
kSingleValueIteration>; \
FUNC(); \
})) \
})) \
})); \
})); \
}
static constexpr int TRANSFORM_BIAS_RESCALE_VEC = 4;
template <typename scalar_t, typename accscalar_t, bool assume_aligned>
__global__ void transform_bias_rescale_qkv_kernel(
// [B, T, 3 * D]
const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
auto NH = q_k_v.size(2);
auto T = q_k_v.size(3);
auto DH = q_k_v.size(4);
auto t = blockIdx.x % T;
auto b = blockIdx.x / T;
auto D = NH * DH;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
// Same as above, but we can't vectorize memory access.
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
scalar_t qkv_q = qkv[b][t][d + 0 * D];
scalar_t qkv_k = qkv[b][t][d + 1 * D];
scalar_t qkv_v = qkv[b][t][d + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
template <typename scalar_t, typename accscalar_t, bool assume_aligned = false>
__global__ void transform_bias_rescale_qkv_add_padding_kernel(
// [B, T, 3 * D], but it's a NestedTensor buffer
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
const int* offsets,
const int* input_sizes,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
const auto NH = q_k_v.size(2);
const auto T = q_k_v.size(3);
const auto DH = q_k_v.size(4);
const auto t = blockIdx.x % T;
const auto b = blockIdx.x / T;
const auto D = NH * DH;
const auto _3D = 3 * D;
const auto offset_for_batch = offsets[b];
const auto input_dim = 1;
const auto* sizes_i = input_sizes + b * input_dim;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
const auto first_item_offset = t * _3D + d;
const auto last_item_offset = first_item_offset + VEC - 1;
const bool first_item_in_bounds = first_item_offset < sizes_i[0];
const bool entire_vec_in_bounds = last_item_offset < sizes_i[0];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
if (entire_vec_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
} else if (first_item_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
qkv_q[0] = qkv[offset + 0 * D];
qkv_k[0] = qkv[offset + 1 * D];
qkv_v[0] = qkv[offset + 2 * D];
qkv_q[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[0]) +
static_cast<accscalar_t>(qkv_bias_q[0])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[0]) +
static_cast<accscalar_t>(qkv_bias_k[0])));
qkv_v[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[0]) +
static_cast<accscalar_t>(qkv_bias_v[0])));
#pragma unroll
for (auto ii = 1; ii < VEC; ++ii) {
const auto loop_offset = offset + ii;
if (loop_offset < sizes_i[0]) {
qkv_q[ii] = qkv[loop_offset + 0 * D];
qkv_k[ii] = qkv[loop_offset + 1 * D];
qkv_v[ii] = qkv[loop_offset + 2 * D];
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
} else {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
} else {
#pragma unroll
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
const auto item_offset = t * _3D + d;
const bool in_bounds = item_offset < sizes_i[0];
scalar_t qkv_q, qkv_k, qkv_v;
if (in_bounds) {
const auto qkv_offset = offset_for_batch + item_offset;
qkv_q = qkv[qkv_offset + 0 * D];
qkv_k = qkv[qkv_offset + 1 * D];
qkv_v = qkv[qkv_offset + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
} else {
qkv_q = 0;
qkv_k = 0;
qkv_v = 0;
}
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
Tensor collapse_dims_1_and_2(const Tensor& sizes) {
auto sizes_dim1 = at::native::narrow_symint(sizes, 1, 0, 1);
auto sizes_dim2 = at::native::narrow_symint(sizes, 1, 1, 1);
return (sizes_dim1 * sizes_dim2).contiguous();
}
} // namespace
// compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias
__host__ std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv_cuda(
const Tensor& qkv,
const Tensor& qkv_bias,
const int64_t num_head) {
auto B = qkv.is_nested()
? get_nested_tensor_impl(qkv)->get_nested_size_tensor().size(0)
: qkv.size(0);
// TODO: calculate this without the std::vector -- NestedTensor_to_mask wants
// this too
auto T = qkv.is_nested()
? NestedTensor_get_max_size(*get_nested_tensor_impl(qkv))[0]
: qkv.size(1);
if (qkv.is_nested()) {
// Don't mess with non-nested case for now since it's not set up to fiddle
// with mask size.
// Round T up to next multiple of 8 so as to be able to utilize Tensor
// cores. Otherwise, sometimes with padding, *no* row will have the maximum
// sequence length and so we'll have a non-divisible-by-8 dimension even if
// the model author chose a multiple of 8.
T = T + (8 - (T % 8)) % 8;
}
auto _3D = qkv_bias.size(0);
auto D = _3D / 3;
TORCH_CHECK(D % num_head == 0);
const auto dim_per_head = D / num_head;
auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv_bias.options());
#define CALL_KERNEL(assume_aligned) \
hipLaunchKernelGGL(( transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t, assume_aligned>) \
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
#define CALL_ADD_PADDING_KERNEL(assume_aligned) \
hipLaunchKernelGGL(( transform_bias_rescale_qkv_add_padding_kernel< \
scalar_t, \
accscalar_t, \
assume_aligned>) \
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
nt_qkv_buffer \
.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
offsets_ptr, \
sizes_ptr, \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
qkv.scalar_type(),
"transform_bias_rescale_qkv",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
auto threads = ::max(
std::min<int32_t>(1024, D / TRANSFORM_BIAS_RESCALE_VEC), 1);
auto blocks = B * T;
const bool aligned =
((dim_per_head % TRANSFORM_BIAS_RESCALE_VEC) == 0) &&
((reinterpret_cast<intptr_t>(qkv_bias.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0);
if (aligned) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
D % TRANSFORM_BIAS_RESCALE_VEC == 0,
"D = num_heads * dim_per_head, so we should have dim_per_head % "
"TRANSFORM_BIAS_RESCALE_VEC == 0 => "
"D % TRANSFORM_BIAS_RESCALE_VEC == 0");
}
if (qkv.is_nested()) {
auto* nt_qkv = get_nested_tensor_impl(qkv);
const at::Tensor& nt_qkv_buffer = nt_qkv->get_buffer();
auto sizes = collapse_dims_1_and_2(nt_qkv->get_nested_size_tensor());
auto offsets =
NestedTensor_batch_offsets_from_size_tensor(sizes, sizes.numel());
at::native::narrow_symint(offsets, 0, sizes.numel() + 1, sizes.numel())
.copy_(sizes.reshape({-1}));
auto metadata = offsets.to(at::Device(kCUDA), at::kInt, true, true);
const auto offsets_ptr = metadata.data_ptr<int>();
const auto sizes_ptr = offsets_ptr + sizes.numel() + 1;
const auto input_dim = sizes.sizes()[1];
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input_dim == 1);
if (aligned &&
((reinterpret_cast<intptr_t>(qkv.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0)) {
CALL_ADD_PADDING_KERNEL(true);
} else {
CALL_ADD_PADDING_KERNEL(false);
}
} else if (aligned) {
CALL_KERNEL(true);
} else {
CALL_KERNEL(false);
}
C10_HIP_KERNEL_LAUNCH_CHECK();
});
#undef CALL_ADD_PADDING_KERNEL
#undef CALL_KERNEL
auto q_k_v_s =
at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0);
return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]);
}
std::tuple<Tensor, Tensor> flash_attention_helper_dense_unpacked(
const Tensor& query,
const Tensor& key,
const Tensor& value,
double dropout_p,
bool need_atten_weights,
bool is_causal) {
// Query (Batch x Num_heads x Q_seq_len x Dim_per_head)
// Key (Batch x Num_heads x KV_seq_len x Dim_per_head)
// Value (Batch x Num_heads x KV_seq_len x Dim_per_head)
const int64_t batch_size = query.size(0);
const int64_t num_heads = query.size(1);
const int64_t max_seqlen_batch_q = query.size(2);
const int64_t head_dim = query.size(3);
const int64_t max_seqlen_batch_k = key.size(2);
const int64_t max_seqlen_batch_v = value.size(2);
TORCH_CHECK(
max_seqlen_batch_k == max_seqlen_batch_v,
"Key and Value must have the same sequence length");
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
Tensor cumulative_sequence_length_q = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_q,
max_seqlen_batch_q,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
Tensor cumulative_sequence_length_k = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_k,
max_seqlen_batch_k,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
int64_t Nnz_q{batch_size * max_seqlen_batch_q};
int64_t Nnz_kv{batch_size * max_seqlen_batch_k};
// For the standard MHA these will actually be views
Tensor query_reshaped = q_t.reshape({Nnz_q, num_heads, head_dim});
Tensor key_reshaped = k_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor value_reshaped = v_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor attention =
at::_flash_scaled_dot_product_attention(
query_reshaped,
key_reshaped,
value_reshaped,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
is_causal);
// Reshape output to convert nnz to batch_size and seq_len
attention =
attention.view({batch_size, max_seqlen_batch_q, num_heads, head_dim}).transpose(1,2);
return std::tuple<Tensor, Tensor>(attention, Tensor());
}
std::tuple<Tensor, Tensor> mem_eff_helper(
const Tensor& query,
const Tensor& key,
const Tensor& value){
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
Tensor attention = std::get<0>(at::_efficient_attention_forward(
q_t,
k_t,
v_t,
c10::nullopt,
c10::nullopt,
c10::nullopt,
false,
false)).transpose(1,2);
return std::make_tuple(attention, Tensor());
}
std::tuple<Tensor, Tensor> _scaled_dot_product_attention_forward_cuda(
const Tensor& query_, const Tensor& key, const Tensor& value,
const c10::optional<Tensor>& attn_mask_, double dropout_p, bool need_attn_weights, bool is_causal) {
// Determine which efficient kernel to use
sdp::sdp_params kernel_params{query_, key, value, attn_mask_.has_value(), dropout_p, need_attn_weights, is_causal};
auto backend = select_sdp_backend(kernel_params);
switch(backend){
case sdp::SDPBackend::flash_attention:
return flash_attention_helper_dense_unpacked(query_, key, value, dropout_p, need_attn_weights, is_causal);
case sdp::SDPBackend::efficient_attention:
return mem_eff_helper(query_, key , value);
case sdp::SDPBackend::math:
return at::_scaled_dot_product_attention_math(query_, key, value, attn_mask_, dropout_p, need_attn_weights, is_causal);
default:
TORCH_CHECK(false, "No viable backend for scaled_dot_product_attention was found.");
return std::make_tuple(Tensor(), Tensor());
}
}
std::tuple<Tensor, Tensor> flash_scaled_dot_product_attention(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const Tensor& cumulative_sequence_length_q,
const Tensor& cumulative_sequence_length_k,
const int64_t max_seqlen_batch_q,
const int64_t max_seqlen_batch_k,
double dropout_p,
bool need_attn_weights,
bool is_causal) {
#if defined(USE_FLASH_ATTENTION)
auto softmax_scale = ::pow(query.size(-1), -0.5);
std::vector<Tensor> output = fmha::mha_fwd(
query,
key,
value,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
softmax_scale,
false,
is_causal,
need_attn_weights,
c10::nullopt);
return need_attn_weights? std::make_tuple(output[0], output[2]): std::make_tuple(output[0], Tensor{});
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor{}, Tensor{});
}
std::tuple<at::Tensor, at::Tensor> _efficient_attention_forward(
const at::Tensor& query, // [b, seqlen, num_heads, K]
const at::Tensor& key, // [b, seqlen, num_heads, K]
const at::Tensor& value, // [b, seqlen, num_heads, Kv]
// (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the
// position of the first query token for batch $b
const c10::optional<at::Tensor>& cu_seqlens_q,
// (Mode 1MHK only) [b+1]: cu_seqlens_k[b] contains the
// position of the first key token for batch $b
const c10::optional<at::Tensor>& cu_seqlens_k,
// (Mode 1MHK only) Maximum sequence length across batches
const c10::optional<int64_t> max_seqlen_q_,
bool compute_logsumexp,
bool causal) {
#if defined(USE_FLASH_ATTENTION)
// TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a
// machine that is >= 5.0. In practice, this is not a problem but since
// this would avoid runtime architecture checks, we should look into it
TORCH_CHECK(query.dim() == 4);
TORCH_CHECK(key.dim() == 4);
TORCH_CHECK(value.dim() == 4);
// Batch sizes
TORCH_CHECK(query.size(0) == key.size(0));
TORCH_CHECK(query.size(0) == value.size(0));
// Sequence length
TORCH_CHECK(key.size(1) == value.size(1));
// Num heads
TORCH_CHECK(query.size(2) == key.size(2));
TORCH_CHECK(query.size(2) == value.size(2));
// Embedding per head
TORCH_CHECK(query.size(3) == key.size(3));
int64_t max_seqlen_q = 0, max_seqlen_k=0;
TORCH_CHECK(cu_seqlens_q.has_value() == cu_seqlens_k.has_value());
if (cu_seqlens_q.has_value()) {
TORCH_CHECK(cu_seqlens_q->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(cu_seqlens_k->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(cu_seqlens_q->dim() == 1 && cu_seqlens_k->dim() == 1);
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_q));
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_k));
TORCH_CHECK(cu_seqlens_q->size(0) == cu_seqlens_k->size(0));
TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1");
TORCH_CHECK(max_seqlen_q_.has_value());
max_seqlen_q = *max_seqlen_q_;
max_seqlen_k = 0; // Will be set inside the kernel
} else {
max_seqlen_q = query.size(1);
max_seqlen_k = key.size(1);
}
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value);
at::hip::HIPGuardMasqueradingAsCUDA device_guard(query.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
int64_t B = query.size(0);
int64_t M = query.size(1);
int64_t N = key.size(1);
int64_t num_heads = query.size(-2);
int64_t K = query.size(-1);
int64_t Kv = value.size(-1);
at::Tensor res;
at::Tensor logsumexp;
auto launchKernel = [&](auto _k, int computeCapability) {
using Kernel = decltype(_k);
using scalar_t = typename Kernel::scalar_t;
(void)_k;
res = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
TypeTraits<typename Kernel::output_t>::atScalarType()));
// NOTE: Should be aligned (by padding) in case M is
// not a good number for loading during backward
constexpr decltype(M) kAlignLSE = Kernel::kAlignLSE;
logsumexp = at::empty(
{B,
num_heads,
compute_logsumexp ? ceil_div(max_seqlen_q, kAlignLSE) * kAlignLSE : 0},
query.options().dtype(at::ScalarType::Float));
typename Kernel::Params p;
p.query_ptr = (scalar_t*)query.data_ptr();
p.key_ptr = (scalar_t*)key.data_ptr();
p.value_ptr = (scalar_t*)value.data_ptr();
p.logsumexp_ptr = compute_logsumexp
? (typename Kernel::lse_scalar_t*)logsumexp.data_ptr()
: nullptr;
at::Tensor output_accum;
if (Kernel::kNeedsOutputAccumulatorBuffer) {
output_accum = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
TypeTraits<typename Kernel::output_accum_t>::atScalarType()));
p.output_accum_ptr =
(typename Kernel::output_accum_t*)output_accum.data_ptr();
} else {
p.output_accum_ptr = nullptr;
}
p.output_ptr = (typename Kernel::output_t*)res.data_ptr();
if (cu_seqlens_q.has_value()) {
p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr();
p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr();
}
#define ASSIGN_CHECK_OVERFLOW(A, B) \
{ \
A = B; \
TORCH_CHECK(B < std::numeric_limits<decltype(A)>::max(), #B " overflows"); \
}
p.num_heads = num_heads;
p.head_dim = query.size(3);
p.head_dim_value = value.size(3);
p.num_queries = max_seqlen_q;
p.num_keys = max_seqlen_k;
p.num_batches = cu_seqlens_q.has_value() ? cu_seqlens_q->size(0) - 1 : B;
p.causal = causal;
ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0));
ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0));
ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0));
ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1));
ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1));
ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1));
ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2));
ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2));
ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2));
constexpr auto kernel_fn = attention_kernel_batched<Kernel>;
size_t smem_bytes = sizeof(typename Kernel::SharedStorage);
if (smem_bytes > 0xc000) {
TORCH_INTERNAL_ASSERT(
computeCapability >= 70,
"This kernel requires too much shared memory on this machine!");
AT_CUDA_CHECK(hipFuncSetAttribute(
kernel_fn, hipFuncAttributeMaxDynamicSharedMemorySize, smem_bytes));
}
Kernel::check_supported(p);
hipLaunchKernelGGL(( kernel_fn), dim3(p.getBlocksGrid()), dim3(p.getThreadsGrid()), smem_bytes, 0, p);
};
// Dispatch to the right kernel
DISPATCH_KERNEL(query, key, value, ([&]() {
launchKernel(Kernel{}, computeCapability);
}));
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(res, logsumexp);
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor{}, Tensor{});
}
Tensor triton_scaled_dot_attention(const Tensor& q, const Tensor& k, const Tensor& v, double dropout_p){
TORCH_CHECK(false, "This operator should be overridden in python before use");
return at::Tensor();
}
} // namespace native
} // namespace at
| 44451fd5fb5c9a4ad146dab898f8a8a0a2cc0716.cu | #include <type_traits>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NestedTensorImpl.h>
#include <ATen/TensorAccessor.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/MemoryAccess.cuh>
#include <ATen/native/cuda/PersistentSoftmax.cuh>
#include <ATen/native/cuda/block_reduce.cuh>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/nested/NestedTensorTransformerFunctions.h>
#ifdef USE_FLASH_ATTENTION
#include <ATen/native/transformers/cuda/flash_attn/fmha_api.h>
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_forward.h>
#endif
#include <ATen/native/transformers/cuda/sdp_utils.h>
namespace at {
namespace native {
namespace {
#define DISPATCH_BLOCKSIZE(VALUE_HEAD_DIM, FN) \
{ \
if (VALUE_HEAD_DIM <= 64) { \
constexpr bool kIs64x64 = true; \
constexpr bool kSingleValueIteration = true; \
FN(); \
} else { \
constexpr bool kIs64x64 = false; \
if (VALUE_HEAD_DIM <= 128) { \
constexpr bool kSingleValueIteration = true; \
FN(); \
} else { \
constexpr bool kSingleValueIteration = false; \
FN(); \
} \
} \
}
#define DISPATCH_KERNEL(QUERY, KEY, VALUE, FUNC) \
{ \
cudaDeviceProp* properties = \
at::cuda::getDeviceProperties(QUERY.device().index()); \
const int computeCapability = properties->major * 10 + properties->minor; \
DISPATCH_BLOCKSIZE( \
VALUE.size(-1), ([&]() { \
static constexpr int64_t kQueriesPerBlock = kIs64x64 ? 64 : 32; \
static constexpr int64_t kKeysPerBlock = kIs64x64 ? 64 : 128; \
DISPATCH_TYPES( \
QUERY, ([&]() { \
DISPATCH_ARCHTAG( \
computeCapability, ([&]() { \
using AlignedAK = AttentionKernel< \
scalar_t, \
ArchTag, \
true, \
kQueriesPerBlock, \
kKeysPerBlock, \
kSingleValueIteration>; \
/* Run a more efficient kernel (with `isAligned=True`) \
if memory is correctly aligned*/ \
bool isAligned = \
(QUERY.stride(2) % AlignedAK::kAlignmentQ == 0 && \
KEY.stride(2) % AlignedAK::kAlignmentK == 0 && \
VALUE.stride(2) % AlignedAK::kAlignmentV == 0); \
/* TODO: Should we warn or log somewhere when we use a \
less efficient kernel due to wrong alignment? */ \
DISPATCH_BOOL(isAligned, kIsAligned, ([&]() { \
using Kernel = AttentionKernel< \
scalar_t, \
ArchTag, \
kIsAligned, \
kQueriesPerBlock, \
kKeysPerBlock, \
kSingleValueIteration>; \
FUNC(); \
})) \
})) \
})); \
})); \
}
static constexpr int TRANSFORM_BIAS_RESCALE_VEC = 4;
template <typename scalar_t, typename accscalar_t, bool assume_aligned>
__global__ void transform_bias_rescale_qkv_kernel(
// [B, T, 3 * D]
const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
auto NH = q_k_v.size(2);
auto T = q_k_v.size(3);
auto DH = q_k_v.size(4);
auto t = blockIdx.x % T;
auto b = blockIdx.x / T;
auto D = NH * DH;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
// Same as above, but we can't vectorize memory access.
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
scalar_t qkv_q = qkv[b][t][d + 0 * D];
scalar_t qkv_k = qkv[b][t][d + 1 * D];
scalar_t qkv_v = qkv[b][t][d + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
template <typename scalar_t, typename accscalar_t, bool assume_aligned = false>
__global__ void transform_bias_rescale_qkv_add_padding_kernel(
// [B, T, 3 * D], but it's a NestedTensor buffer
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
const int* offsets,
const int* input_sizes,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
const auto NH = q_k_v.size(2);
const auto T = q_k_v.size(3);
const auto DH = q_k_v.size(4);
const auto t = blockIdx.x % T;
const auto b = blockIdx.x / T;
const auto D = NH * DH;
const auto _3D = 3 * D;
const auto offset_for_batch = offsets[b];
const auto input_dim = 1;
const auto* sizes_i = input_sizes + b * input_dim;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
const auto first_item_offset = t * _3D + d;
const auto last_item_offset = first_item_offset + VEC - 1;
const bool first_item_in_bounds = first_item_offset < sizes_i[0];
const bool entire_vec_in_bounds = last_item_offset < sizes_i[0];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
if (entire_vec_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
} else if (first_item_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
qkv_q[0] = qkv[offset + 0 * D];
qkv_k[0] = qkv[offset + 1 * D];
qkv_v[0] = qkv[offset + 2 * D];
qkv_q[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[0]) +
static_cast<accscalar_t>(qkv_bias_q[0])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[0]) +
static_cast<accscalar_t>(qkv_bias_k[0])));
qkv_v[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[0]) +
static_cast<accscalar_t>(qkv_bias_v[0])));
#pragma unroll
for (auto ii = 1; ii < VEC; ++ii) {
const auto loop_offset = offset + ii;
if (loop_offset < sizes_i[0]) {
qkv_q[ii] = qkv[loop_offset + 0 * D];
qkv_k[ii] = qkv[loop_offset + 1 * D];
qkv_v[ii] = qkv[loop_offset + 2 * D];
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
} else {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
} else {
#pragma unroll
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
const auto item_offset = t * _3D + d;
const bool in_bounds = item_offset < sizes_i[0];
scalar_t qkv_q, qkv_k, qkv_v;
if (in_bounds) {
const auto qkv_offset = offset_for_batch + item_offset;
qkv_q = qkv[qkv_offset + 0 * D];
qkv_k = qkv[qkv_offset + 1 * D];
qkv_v = qkv[qkv_offset + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
} else {
qkv_q = 0;
qkv_k = 0;
qkv_v = 0;
}
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
Tensor collapse_dims_1_and_2(const Tensor& sizes) {
auto sizes_dim1 = at::native::narrow_symint(sizes, 1, 0, 1);
auto sizes_dim2 = at::native::narrow_symint(sizes, 1, 1, 1);
return (sizes_dim1 * sizes_dim2).contiguous();
}
} // namespace
// compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias
__host__ std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv_cuda(
const Tensor& qkv,
const Tensor& qkv_bias,
const int64_t num_head) {
auto B = qkv.is_nested()
? get_nested_tensor_impl(qkv)->get_nested_size_tensor().size(0)
: qkv.size(0);
// TODO: calculate this without the std::vector -- NestedTensor_to_mask wants
// this too
auto T = qkv.is_nested()
? NestedTensor_get_max_size(*get_nested_tensor_impl(qkv))[0]
: qkv.size(1);
if (qkv.is_nested()) {
// Don't mess with non-nested case for now since it's not set up to fiddle
// with mask size.
// Round T up to next multiple of 8 so as to be able to utilize Tensor
// cores. Otherwise, sometimes with padding, *no* row will have the maximum
// sequence length and so we'll have a non-divisible-by-8 dimension even if
// the model author chose a multiple of 8.
T = T + (8 - (T % 8)) % 8;
}
auto _3D = qkv_bias.size(0);
auto D = _3D / 3;
TORCH_CHECK(D % num_head == 0);
const auto dim_per_head = D / num_head;
auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv_bias.options());
#define CALL_KERNEL(assume_aligned) \
transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t, assume_aligned> \
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( \
qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
#define CALL_ADD_PADDING_KERNEL(assume_aligned) \
transform_bias_rescale_qkv_add_padding_kernel< \
scalar_t, \
accscalar_t, \
assume_aligned> \
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( \
nt_qkv_buffer \
.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
offsets_ptr, \
sizes_ptr, \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
qkv.scalar_type(),
"transform_bias_rescale_qkv",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
auto threads = std::max(
std::min<int32_t>(1024, D / TRANSFORM_BIAS_RESCALE_VEC), 1);
auto blocks = B * T;
const bool aligned =
((dim_per_head % TRANSFORM_BIAS_RESCALE_VEC) == 0) &&
((reinterpret_cast<intptr_t>(qkv_bias.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0);
if (aligned) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
D % TRANSFORM_BIAS_RESCALE_VEC == 0,
"D = num_heads * dim_per_head, so we should have dim_per_head % "
"TRANSFORM_BIAS_RESCALE_VEC == 0 => "
"D % TRANSFORM_BIAS_RESCALE_VEC == 0");
}
if (qkv.is_nested()) {
auto* nt_qkv = get_nested_tensor_impl(qkv);
const at::Tensor& nt_qkv_buffer = nt_qkv->get_buffer();
auto sizes = collapse_dims_1_and_2(nt_qkv->get_nested_size_tensor());
auto offsets =
NestedTensor_batch_offsets_from_size_tensor(sizes, sizes.numel());
at::native::narrow_symint(offsets, 0, sizes.numel() + 1, sizes.numel())
.copy_(sizes.reshape({-1}));
auto metadata = offsets.to(at::Device(kCUDA), at::kInt, true, true);
const auto offsets_ptr = metadata.data_ptr<int>();
const auto sizes_ptr = offsets_ptr + sizes.numel() + 1;
const auto input_dim = sizes.sizes()[1];
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input_dim == 1);
if (aligned &&
((reinterpret_cast<intptr_t>(qkv.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0)) {
CALL_ADD_PADDING_KERNEL(true);
} else {
CALL_ADD_PADDING_KERNEL(false);
}
} else if (aligned) {
CALL_KERNEL(true);
} else {
CALL_KERNEL(false);
}
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
#undef CALL_ADD_PADDING_KERNEL
#undef CALL_KERNEL
auto q_k_v_s =
at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0);
return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]);
}
std::tuple<Tensor, Tensor> flash_attention_helper_dense_unpacked(
const Tensor& query,
const Tensor& key,
const Tensor& value,
double dropout_p,
bool need_atten_weights,
bool is_causal) {
// Query (Batch x Num_heads x Q_seq_len x Dim_per_head)
// Key (Batch x Num_heads x KV_seq_len x Dim_per_head)
// Value (Batch x Num_heads x KV_seq_len x Dim_per_head)
const int64_t batch_size = query.size(0);
const int64_t num_heads = query.size(1);
const int64_t max_seqlen_batch_q = query.size(2);
const int64_t head_dim = query.size(3);
const int64_t max_seqlen_batch_k = key.size(2);
const int64_t max_seqlen_batch_v = value.size(2);
TORCH_CHECK(
max_seqlen_batch_k == max_seqlen_batch_v,
"Key and Value must have the same sequence length");
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
Tensor cumulative_sequence_length_q = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_q,
max_seqlen_batch_q,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
Tensor cumulative_sequence_length_k = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_k,
max_seqlen_batch_k,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
int64_t Nnz_q{batch_size * max_seqlen_batch_q};
int64_t Nnz_kv{batch_size * max_seqlen_batch_k};
// For the standard MHA these will actually be views
Tensor query_reshaped = q_t.reshape({Nnz_q, num_heads, head_dim});
Tensor key_reshaped = k_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor value_reshaped = v_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor attention =
at::_flash_scaled_dot_product_attention(
query_reshaped,
key_reshaped,
value_reshaped,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
is_causal);
// Reshape output to convert nnz to batch_size and seq_len
attention =
attention.view({batch_size, max_seqlen_batch_q, num_heads, head_dim}).transpose(1,2);
return std::tuple<Tensor, Tensor>(attention, Tensor());
}
std::tuple<Tensor, Tensor> mem_eff_helper(
const Tensor& query,
const Tensor& key,
const Tensor& value){
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
Tensor attention = std::get<0>(at::_efficient_attention_forward(
q_t,
k_t,
v_t,
c10::nullopt,
c10::nullopt,
c10::nullopt,
false,
false)).transpose(1,2);
return std::make_tuple(attention, Tensor());
}
std::tuple<Tensor, Tensor> _scaled_dot_product_attention_forward_cuda(
const Tensor& query_, const Tensor& key, const Tensor& value,
const c10::optional<Tensor>& attn_mask_, double dropout_p, bool need_attn_weights, bool is_causal) {
// Determine which efficient kernel to use
sdp::sdp_params kernel_params{query_, key, value, attn_mask_.has_value(), dropout_p, need_attn_weights, is_causal};
auto backend = select_sdp_backend(kernel_params);
switch(backend){
case sdp::SDPBackend::flash_attention:
return flash_attention_helper_dense_unpacked(query_, key, value, dropout_p, need_attn_weights, is_causal);
case sdp::SDPBackend::efficient_attention:
return mem_eff_helper(query_, key , value);
case sdp::SDPBackend::math:
return at::_scaled_dot_product_attention_math(query_, key, value, attn_mask_, dropout_p, need_attn_weights, is_causal);
default:
TORCH_CHECK(false, "No viable backend for scaled_dot_product_attention was found.");
return std::make_tuple(Tensor(), Tensor());
}
}
std::tuple<Tensor, Tensor> flash_scaled_dot_product_attention(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const Tensor& cumulative_sequence_length_q,
const Tensor& cumulative_sequence_length_k,
const int64_t max_seqlen_batch_q,
const int64_t max_seqlen_batch_k,
double dropout_p,
bool need_attn_weights,
bool is_causal) {
#if defined(USE_FLASH_ATTENTION)
auto softmax_scale = std::pow(query.size(-1), -0.5);
std::vector<Tensor> output = fmha::mha_fwd(
query,
key,
value,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
softmax_scale,
false,
is_causal,
need_attn_weights,
c10::nullopt);
return need_attn_weights? std::make_tuple(output[0], output[2]): std::make_tuple(output[0], Tensor{});
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor{}, Tensor{});
}
std::tuple<at::Tensor, at::Tensor> _efficient_attention_forward(
const at::Tensor& query, // [b, seqlen, num_heads, K]
const at::Tensor& key, // [b, seqlen, num_heads, K]
const at::Tensor& value, // [b, seqlen, num_heads, Kv]
// (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the
// position of the first query token for batch $b
const c10::optional<at::Tensor>& cu_seqlens_q,
// (Mode 1MHK only) [b+1]: cu_seqlens_k[b] contains the
// position of the first key token for batch $b
const c10::optional<at::Tensor>& cu_seqlens_k,
// (Mode 1MHK only) Maximum sequence length across batches
const c10::optional<int64_t> max_seqlen_q_,
bool compute_logsumexp,
bool causal) {
#if defined(USE_FLASH_ATTENTION)
// TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a
// machine that is >= 5.0. In practice, this is not a problem but since
// this would avoid runtime architecture checks, we should look into it
TORCH_CHECK(query.dim() == 4);
TORCH_CHECK(key.dim() == 4);
TORCH_CHECK(value.dim() == 4);
// Batch sizes
TORCH_CHECK(query.size(0) == key.size(0));
TORCH_CHECK(query.size(0) == value.size(0));
// Sequence length
TORCH_CHECK(key.size(1) == value.size(1));
// Num heads
TORCH_CHECK(query.size(2) == key.size(2));
TORCH_CHECK(query.size(2) == value.size(2));
// Embedding per head
TORCH_CHECK(query.size(3) == key.size(3));
int64_t max_seqlen_q = 0, max_seqlen_k=0;
TORCH_CHECK(cu_seqlens_q.has_value() == cu_seqlens_k.has_value());
if (cu_seqlens_q.has_value()) {
TORCH_CHECK(cu_seqlens_q->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(cu_seqlens_k->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(cu_seqlens_q->dim() == 1 && cu_seqlens_k->dim() == 1);
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_q));
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_k));
TORCH_CHECK(cu_seqlens_q->size(0) == cu_seqlens_k->size(0));
TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1");
TORCH_CHECK(max_seqlen_q_.has_value());
max_seqlen_q = *max_seqlen_q_;
max_seqlen_k = 0; // Will be set inside the kernel
} else {
max_seqlen_q = query.size(1);
max_seqlen_k = key.size(1);
}
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value);
at::cuda::CUDAGuard device_guard(query.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
int64_t B = query.size(0);
int64_t M = query.size(1);
int64_t N = key.size(1);
int64_t num_heads = query.size(-2);
int64_t K = query.size(-1);
int64_t Kv = value.size(-1);
at::Tensor res;
at::Tensor logsumexp;
auto launchKernel = [&](auto _k, int computeCapability) {
using Kernel = decltype(_k);
using scalar_t = typename Kernel::scalar_t;
(void)_k;
res = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
TypeTraits<typename Kernel::output_t>::atScalarType()));
// NOTE: Should be aligned (by padding) in case M is
// not a good number for loading during backward
constexpr decltype(M) kAlignLSE = Kernel::kAlignLSE;
logsumexp = at::empty(
{B,
num_heads,
compute_logsumexp ? ceil_div(max_seqlen_q, kAlignLSE) * kAlignLSE : 0},
query.options().dtype(at::ScalarType::Float));
typename Kernel::Params p;
p.query_ptr = (scalar_t*)query.data_ptr();
p.key_ptr = (scalar_t*)key.data_ptr();
p.value_ptr = (scalar_t*)value.data_ptr();
p.logsumexp_ptr = compute_logsumexp
? (typename Kernel::lse_scalar_t*)logsumexp.data_ptr()
: nullptr;
at::Tensor output_accum;
if (Kernel::kNeedsOutputAccumulatorBuffer) {
output_accum = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
TypeTraits<typename Kernel::output_accum_t>::atScalarType()));
p.output_accum_ptr =
(typename Kernel::output_accum_t*)output_accum.data_ptr();
} else {
p.output_accum_ptr = nullptr;
}
p.output_ptr = (typename Kernel::output_t*)res.data_ptr();
if (cu_seqlens_q.has_value()) {
p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr();
p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr();
}
#define ASSIGN_CHECK_OVERFLOW(A, B) \
{ \
A = B; \
TORCH_CHECK(B < std::numeric_limits<decltype(A)>::max(), #B " overflows"); \
}
p.num_heads = num_heads;
p.head_dim = query.size(3);
p.head_dim_value = value.size(3);
p.num_queries = max_seqlen_q;
p.num_keys = max_seqlen_k;
p.num_batches = cu_seqlens_q.has_value() ? cu_seqlens_q->size(0) - 1 : B;
p.causal = causal;
ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0));
ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0));
ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0));
ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1));
ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1));
ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1));
ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2));
ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2));
ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2));
constexpr auto kernel_fn = attention_kernel_batched<Kernel>;
size_t smem_bytes = sizeof(typename Kernel::SharedStorage);
if (smem_bytes > 0xc000) {
TORCH_INTERNAL_ASSERT(
computeCapability >= 70,
"This kernel requires too much shared memory on this machine!");
AT_CUDA_CHECK(cudaFuncSetAttribute(
kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes));
}
Kernel::check_supported(p);
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p);
};
// Dispatch to the right kernel
DISPATCH_KERNEL(query, key, value, ([&]() {
launchKernel(Kernel{}, computeCapability);
}));
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(res, logsumexp);
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor{}, Tensor{});
}
Tensor triton_scaled_dot_attention(const Tensor& q, const Tensor& k, const Tensor& v, double dropout_p){
TORCH_CHECK(false, "This operator should be overridden in python before use");
return at::Tensor();
}
} // namespace native
} // namespace at
|
9d43ebcba5b73c5f2050dbcb3e559ba5b61dc341.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates several CUTLASS utilities in the context of a mixed-precision
floating-point matrix product computation.
These utilities are intended to be useful supporting components for managing tensor and matrix
memory allocations, initializing and comparing results, and computing reference output.
CUTLASS utilities are defined in the directory `tools/util`, and definitions appear
namespace `cutlass::` or an inner namespace therein. Operations in `cutlass::reference::` have
both host-side and device-side implementations, and the choice to use device-side initialization
and host-side verification in this example was arbitrary.
cutlass::half_t
This is a numeric type implementing IEEE half-precision quantities. It is functional in host
and device code. In host-side code, CUTLASS_ENABLE_F16C optionally enables harware-accelerated
numeric conversion on x86-64 CPUs support F16C extensions. In device code, all available
hardware is used to implement conversion and numeric operations.
cutlass::HostTensor<>
This template class simplifies the creation of tensors for all supported layouts. It simplifies
allocation and management of host- and device- memory allocations.
This class offers methods device_view() and host_view() to provide TensorView objects for
device- and host-side memory allocations.
cutlass::reference::device::TensorFillRandomGaussian()
This template function initializes elementsof a tensor to a random Gaussian distribution. It
uses cuRAND in device code to compute random numbers.
cutlass::reference::host::Gemm<>
This template function computes the general matrix product. This template supports unique
data types for each matrix operand, the internal accumulation type, and the scalar parameters
alpha and beta.
cutlass::reference::host::TensorEquals()
Compares two tensors of identical rank and returns true if values are bit equivalent.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
#include <fstream>
// CUTLASS includes needed for half-precision GEMM kernel
#include "cutlass/cutlass.h"
#include "cutlass/core_io.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/device/gemm.h"
//
// CUTLASS utility includes
//
// Defines operator<<() to write TensorView objects to std::ostream
#include "cutlass/util/tensor_view_io.h"
// Defines cutlass::HostTensor<>
#include "cutlass/util/host_tensor.h"
// Defines cutlass::half_t
#include "cutlass/numeric_types.h"
// Defines device_memory::copy_device_to_device()
#include "cutlass/util/device_memory.h"
// Defines cutlass::reference::device::TensorFillRandomGaussian()
#include "cutlass/util/reference/device/tensor_fill.h"
// Defines cutlass::reference::host::TensorEquals()
#include "cutlass/util/reference/host/tensor_compare.h"
// Defines cutlass::reference::host::Gemm()
#include "cutlass/util/reference/host/gemm.h"
#pragma warning( disable : 4503)
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS GEMM template and launch a GEMM kernel.
hipError_t cutlass_hgemm_nn(
int M,
int N,
int K,
cutlass::half_t alpha,
cutlass::half_t const *A,
int lda,
cutlass::half_t const *B,
int ldb,
cutlass::half_t beta,
cutlass::half_t *C,
int ldc) {
// Define the GEMM operation
using Gemm = cutlass::gemm::device::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor // LayoutOutput
>;
Gemm gemm_op;
cutlass::Status status = gemm_op({
{M, N, K},
{A, lda},
{B, ldb},
{C, ldc},
{C, ldc},
{alpha, beta}
});
if (status != cutlass::Status::kSuccess) {
return hipErrorUnknown;
}
return hipSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a single-precision
/// CUTLASS GEMM kernel.
hipError_t TestCutlassGemm(int M, int N, int K, cutlass::half_t alpha, cutlass::half_t beta) {
hipError_t result;
//
// Construct cutlass::HostTensor<> using the half-precision host-side type.
//
// cutlass::HostTensor<> allocates memory on both the host and device corresponding to rank=2
// tensors in column-major layout. Explicit synchronization methods are offered to copy the
// tensor to the device or to the host.
//
// M-by-K matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> A(cutlass::MatrixCoord(M, K));
// K-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> B(cutlass::MatrixCoord(K, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C_cutlass(cutlass::MatrixCoord(M, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C_reference(cutlass::MatrixCoord(M, N));
//
// Initialize matrices with small, random integers.
//
// Arbitrary RNG seed value. Hard-coded for deterministic results.
uint64_t seed = 2080;
// Gaussian random distribution
cutlass::half_t mean = 0.0_hf;
cutlass::half_t stddev = 5.0_hf;
// Specify the number of bits right of the binary decimal that are permitted
// to be non-zero. A value of "0" here truncates random values to integers
int bits_less_than_one = 0;
cutlass::reference::device::TensorFillRandomGaussian(
A.device_view(),
seed,
mean,
stddev,
bits_less_than_one
);
cutlass::reference::device::TensorFillRandomGaussian(
B.device_view(),
seed * 2019,
mean,
stddev,
bits_less_than_one
);
cutlass::reference::device::TensorFillRandomGaussian(
C_cutlass.device_view(),
seed * 1993,
mean,
stddev,
bits_less_than_one
);
// Copy C_cutlass into C_reference so the GEMM is correct when beta != 0.
cutlass::device_memory::copy_device_to_device(
C_reference.device_data(),
C_cutlass.device_data(),
C_cutlass.capacity());
// Copy the device-side view into host memory
C_reference.sync_host();
//
// Launch the CUTLASS GEMM kernel
//
result = cutlass_hgemm_nn(
M,
N,
K,
alpha,
A.device_data(),
A.stride(0),
B.device_data(),
B.stride(0),
beta,
C_cutlass.device_data(),
C_cutlass.stride(0)
);
if (result != hipSuccess) {
return result;
}
//
// Verify the result using a host-side reference
//
// A and B were initialized using device-side procedures. The intent of this example is to
// use the host-side reference GEMM, so we must perform a device-to-host copy.
A.sync_host();
B.sync_host();
// Copy CUTLASS's GEMM results into host memory.
C_cutlass.sync_host();
// Compute the reference result using the host-side GEMM reference implementation.
cutlass::reference::host::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor, // LayoutOutput
cutlass::half_t,
cutlass::half_t
> gemm_ref;
gemm_ref(
{M, N, K}, // problem size (type: cutlass::gemm::GemmCoord)
alpha, // alpha (type: cutlass::half_t)
A.host_ref(), // A (type: TensorRef<half_t, ColumnMajor>)
B.host_ref(), // B (type: TensorRef<half_t, ColumnMajor>)
beta, // beta (type: cutlass::half_t)
C_reference.host_ref() // C (type: TensorRef<half_t, ColumnMajor>)
);
// Compare reference to computed results.
if (!cutlass::reference::host::TensorEquals(
C_reference.host_view(),
C_cutlass.host_view())) {
char const *filename = "errors_01_cutlass_utilities.csv";
std::cerr << "Error - CUTLASS GEMM kernel differs from reference. Wrote computed and reference results to '" << filename << "'" << std::endl;
//
// On error, print C_cutlass and C_reference to std::cerr.
//
// Note, these are matrices of half-precision elements stored in host memory as
// arrays of type cutlass::half_t.
//
std::ofstream file(filename);
// Result of CUTLASS GEMM kernel
file << "\n\nCUTLASS =\n" << C_cutlass.host_view() << std::endl;
// Result of reference computation
file << "\n\nReference =\n" << C_reference.host_view() << std::endl;
// Return error code.
return hipErrorUnknown;
}
// Passed error check
return hipSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to cutlass_utilities example.
//
// usage:
//
// 01_cutlass_utilities <M> <N> <K> <alpha> <beta>
//
int main(int argc, const char *arg[]) {
//
// This example uses half-precision and is only suitable for devices with compute capabitliy 5.3 or greater.
//
hipDeviceProp_t prop;
hipError_t result = hipGetDeviceProperties(&prop, 0);
if (result != hipSuccess) {
std::cerr << "Failed to query device properties with error " << hipGetErrorString(result) << std::endl;
return -1;
}
if (!(prop.major > 5 || (prop.major == 5 && prop.minor >= 3))) {
std::cerr << "This example uses half precision and is only suitable for devices with compute capability 5.3 or greater.\n";
std::cerr << "You are using a CUDA device with compute capability " << prop.major << "." << prop.minor << std::endl;
return -1;
}
//
// Parse the command line to obtain GEMM dimensions and scalar values.
//
// GEMM problem dimensions: <M> <N> <K>
int problem[3] = { 128, 128, 128 };
for (int i = 1; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Linear scale factors in GEMM. Note, these are half-precision values stored as
// cutlass::half_t.
//
// Values outside the range of IEEE FP16 will overflow to infinity or underflow to zero.
//
cutlass::half_t scalars[2] = { 1.0_hf, 0.0_hf };
for (int i = 4; i < argc && i < 6; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 4]; // lexical cast to cutlass::half_t
}
//
// Run the CUTLASS GEMM test.
//
result = TestCutlassGemm(
problem[0], // GEMM M dimension
problem[1], // GEMM N dimension
problem[2], // GEMM K dimension
scalars[0], // alpha
scalars[1] // beta
);
if (result == hipSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == hipSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| 9d43ebcba5b73c5f2050dbcb3e559ba5b61dc341.cu | /***************************************************************************************************
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates several CUTLASS utilities in the context of a mixed-precision
floating-point matrix product computation.
These utilities are intended to be useful supporting components for managing tensor and matrix
memory allocations, initializing and comparing results, and computing reference output.
CUTLASS utilities are defined in the directory `tools/util`, and definitions appear
namespace `cutlass::` or an inner namespace therein. Operations in `cutlass::reference::` have
both host-side and device-side implementations, and the choice to use device-side initialization
and host-side verification in this example was arbitrary.
cutlass::half_t
This is a numeric type implementing IEEE half-precision quantities. It is functional in host
and device code. In host-side code, CUTLASS_ENABLE_F16C optionally enables harware-accelerated
numeric conversion on x86-64 CPUs support F16C extensions. In device code, all available
hardware is used to implement conversion and numeric operations.
cutlass::HostTensor<>
This template class simplifies the creation of tensors for all supported layouts. It simplifies
allocation and management of host- and device- memory allocations.
This class offers methods device_view() and host_view() to provide TensorView objects for
device- and host-side memory allocations.
cutlass::reference::device::TensorFillRandomGaussian()
This template function initializes elementsof a tensor to a random Gaussian distribution. It
uses cuRAND in device code to compute random numbers.
cutlass::reference::host::Gemm<>
This template function computes the general matrix product. This template supports unique
data types for each matrix operand, the internal accumulation type, and the scalar parameters
alpha and beta.
cutlass::reference::host::TensorEquals()
Compares two tensors of identical rank and returns true if values are bit equivalent.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
#include <fstream>
// CUTLASS includes needed for half-precision GEMM kernel
#include "cutlass/cutlass.h"
#include "cutlass/core_io.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/device/gemm.h"
//
// CUTLASS utility includes
//
// Defines operator<<() to write TensorView objects to std::ostream
#include "cutlass/util/tensor_view_io.h"
// Defines cutlass::HostTensor<>
#include "cutlass/util/host_tensor.h"
// Defines cutlass::half_t
#include "cutlass/numeric_types.h"
// Defines device_memory::copy_device_to_device()
#include "cutlass/util/device_memory.h"
// Defines cutlass::reference::device::TensorFillRandomGaussian()
#include "cutlass/util/reference/device/tensor_fill.h"
// Defines cutlass::reference::host::TensorEquals()
#include "cutlass/util/reference/host/tensor_compare.h"
// Defines cutlass::reference::host::Gemm()
#include "cutlass/util/reference/host/gemm.h"
#pragma warning( disable : 4503)
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS GEMM template and launch a GEMM kernel.
cudaError_t cutlass_hgemm_nn(
int M,
int N,
int K,
cutlass::half_t alpha,
cutlass::half_t const *A,
int lda,
cutlass::half_t const *B,
int ldb,
cutlass::half_t beta,
cutlass::half_t *C,
int ldc) {
// Define the GEMM operation
using Gemm = cutlass::gemm::device::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor // LayoutOutput
>;
Gemm gemm_op;
cutlass::Status status = gemm_op({
{M, N, K},
{A, lda},
{B, ldb},
{C, ldc},
{C, ldc},
{alpha, beta}
});
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a single-precision
/// CUTLASS GEMM kernel.
cudaError_t TestCutlassGemm(int M, int N, int K, cutlass::half_t alpha, cutlass::half_t beta) {
cudaError_t result;
//
// Construct cutlass::HostTensor<> using the half-precision host-side type.
//
// cutlass::HostTensor<> allocates memory on both the host and device corresponding to rank=2
// tensors in column-major layout. Explicit synchronization methods are offered to copy the
// tensor to the device or to the host.
//
// M-by-K matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> A(cutlass::MatrixCoord(M, K));
// K-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> B(cutlass::MatrixCoord(K, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C_cutlass(cutlass::MatrixCoord(M, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C_reference(cutlass::MatrixCoord(M, N));
//
// Initialize matrices with small, random integers.
//
// Arbitrary RNG seed value. Hard-coded for deterministic results.
uint64_t seed = 2080;
// Gaussian random distribution
cutlass::half_t mean = 0.0_hf;
cutlass::half_t stddev = 5.0_hf;
// Specify the number of bits right of the binary decimal that are permitted
// to be non-zero. A value of "0" here truncates random values to integers
int bits_less_than_one = 0;
cutlass::reference::device::TensorFillRandomGaussian(
A.device_view(),
seed,
mean,
stddev,
bits_less_than_one
);
cutlass::reference::device::TensorFillRandomGaussian(
B.device_view(),
seed * 2019,
mean,
stddev,
bits_less_than_one
);
cutlass::reference::device::TensorFillRandomGaussian(
C_cutlass.device_view(),
seed * 1993,
mean,
stddev,
bits_less_than_one
);
// Copy C_cutlass into C_reference so the GEMM is correct when beta != 0.
cutlass::device_memory::copy_device_to_device(
C_reference.device_data(),
C_cutlass.device_data(),
C_cutlass.capacity());
// Copy the device-side view into host memory
C_reference.sync_host();
//
// Launch the CUTLASS GEMM kernel
//
result = cutlass_hgemm_nn(
M,
N,
K,
alpha,
A.device_data(),
A.stride(0),
B.device_data(),
B.stride(0),
beta,
C_cutlass.device_data(),
C_cutlass.stride(0)
);
if (result != cudaSuccess) {
return result;
}
//
// Verify the result using a host-side reference
//
// A and B were initialized using device-side procedures. The intent of this example is to
// use the host-side reference GEMM, so we must perform a device-to-host copy.
A.sync_host();
B.sync_host();
// Copy CUTLASS's GEMM results into host memory.
C_cutlass.sync_host();
// Compute the reference result using the host-side GEMM reference implementation.
cutlass::reference::host::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor, // LayoutOutput
cutlass::half_t,
cutlass::half_t
> gemm_ref;
gemm_ref(
{M, N, K}, // problem size (type: cutlass::gemm::GemmCoord)
alpha, // alpha (type: cutlass::half_t)
A.host_ref(), // A (type: TensorRef<half_t, ColumnMajor>)
B.host_ref(), // B (type: TensorRef<half_t, ColumnMajor>)
beta, // beta (type: cutlass::half_t)
C_reference.host_ref() // C (type: TensorRef<half_t, ColumnMajor>)
);
// Compare reference to computed results.
if (!cutlass::reference::host::TensorEquals(
C_reference.host_view(),
C_cutlass.host_view())) {
char const *filename = "errors_01_cutlass_utilities.csv";
std::cerr << "Error - CUTLASS GEMM kernel differs from reference. Wrote computed and reference results to '" << filename << "'" << std::endl;
//
// On error, print C_cutlass and C_reference to std::cerr.
//
// Note, these are matrices of half-precision elements stored in host memory as
// arrays of type cutlass::half_t.
//
std::ofstream file(filename);
// Result of CUTLASS GEMM kernel
file << "\n\nCUTLASS =\n" << C_cutlass.host_view() << std::endl;
// Result of reference computation
file << "\n\nReference =\n" << C_reference.host_view() << std::endl;
// Return error code.
return cudaErrorUnknown;
}
// Passed error check
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to cutlass_utilities example.
//
// usage:
//
// 01_cutlass_utilities <M> <N> <K> <alpha> <beta>
//
int main(int argc, const char *arg[]) {
//
// This example uses half-precision and is only suitable for devices with compute capabitliy 5.3 or greater.
//
cudaDeviceProp prop;
cudaError_t result = cudaGetDeviceProperties(&prop, 0);
if (result != cudaSuccess) {
std::cerr << "Failed to query device properties with error " << cudaGetErrorString(result) << std::endl;
return -1;
}
if (!(prop.major > 5 || (prop.major == 5 && prop.minor >= 3))) {
std::cerr << "This example uses half precision and is only suitable for devices with compute capability 5.3 or greater.\n";
std::cerr << "You are using a CUDA device with compute capability " << prop.major << "." << prop.minor << std::endl;
return -1;
}
//
// Parse the command line to obtain GEMM dimensions and scalar values.
//
// GEMM problem dimensions: <M> <N> <K>
int problem[3] = { 128, 128, 128 };
for (int i = 1; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Linear scale factors in GEMM. Note, these are half-precision values stored as
// cutlass::half_t.
//
// Values outside the range of IEEE FP16 will overflow to infinity or underflow to zero.
//
cutlass::half_t scalars[2] = { 1.0_hf, 0.0_hf };
for (int i = 4; i < argc && i < 6; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 4]; // lexical cast to cutlass::half_t
}
//
// Run the CUTLASS GEMM test.
//
result = TestCutlassGemm(
problem[0], // GEMM M dimension
problem[1], // GEMM N dimension
problem[2], // GEMM K dimension
scalars[0], // alpha
scalars[1] // beta
);
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
168f176cc6f205daeb76bfd8cc30459d03164336.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_xder1_kernel;
int xdim0_xder1_kernel_h = -1;
int ydim0_xder1_kernel_h = -1;
__constant__ int xdim1_xder1_kernel;
int xdim1_xder1_kernel_h = -1;
int ydim1_xder1_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x) (x)
#define OPS_ACC1(x) (x)
// user function
__device__
void
xder1_kernel_gpu(const double *inp, double *out) {
double dix = 1 / (12.00 * dx);
out[OPS_ACC1(0)] = (inp[OPS_ACC0(-2)] - inp[OPS_ACC0(2)] +
8.0 * (inp[OPS_ACC0(1)] - inp[OPS_ACC0(-1)])) *
dix;
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_xder1_kernel(const double *__restrict arg0,
double *__restrict arg1, int size0) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1;
arg1 += idx_x * 1 * 1;
if (idx_x < size0) {
xder1_kernel_gpu(arg0, arg1);
}
}
// host stub function
void ops_par_loop_xder1_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1) {
// Timing
double t1, t2, c1, c2;
ops_arg args[2] = {arg0, arg1};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 2, range, 4))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(4, "xder1_kernel");
OPS_kernels[4].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[1];
int end[1];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
if (xdim0 != xdim0_xder1_kernel_h || xdim1 != xdim1_xder1_kernel_h) {
hipMemcpyToSymbol(xdim0_xder1_kernel, &xdim0, sizeof(int));
xdim0_xder1_kernel_h = xdim0;
hipMemcpyToSymbol(xdim1_xder1_kernel, &xdim1, sizeof(int));
xdim1_xder1_kernel_h = xdim1;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1);
dim3 tblock(OPS_block_size_x, 1, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[2];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args, 2, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[4].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_xder1_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1],
x_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[4].time += t1 - t2;
}
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[4].mpi_time += t2 - t1;
OPS_kernels[4].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[4].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| 168f176cc6f205daeb76bfd8cc30459d03164336.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_xder1_kernel;
int xdim0_xder1_kernel_h = -1;
int ydim0_xder1_kernel_h = -1;
__constant__ int xdim1_xder1_kernel;
int xdim1_xder1_kernel_h = -1;
int ydim1_xder1_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x) (x)
#define OPS_ACC1(x) (x)
// user function
__device__
void
xder1_kernel_gpu(const double *inp, double *out) {
double dix = 1 / (12.00 * dx);
out[OPS_ACC1(0)] = (inp[OPS_ACC0(-2)] - inp[OPS_ACC0(2)] +
8.0 * (inp[OPS_ACC0(1)] - inp[OPS_ACC0(-1)])) *
dix;
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_xder1_kernel(const double *__restrict arg0,
double *__restrict arg1, int size0) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1;
arg1 += idx_x * 1 * 1;
if (idx_x < size0) {
xder1_kernel_gpu(arg0, arg1);
}
}
// host stub function
void ops_par_loop_xder1_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1) {
// Timing
double t1, t2, c1, c2;
ops_arg args[2] = {arg0, arg1};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 2, range, 4))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(4, "xder1_kernel");
OPS_kernels[4].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[1];
int end[1];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
if (xdim0 != xdim0_xder1_kernel_h || xdim1 != xdim1_xder1_kernel_h) {
cudaMemcpyToSymbol(xdim0_xder1_kernel, &xdim0, sizeof(int));
xdim0_xder1_kernel_h = xdim0;
cudaMemcpyToSymbol(xdim1_xder1_kernel, &xdim1, sizeof(int));
xdim1_xder1_kernel_h = xdim1;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1);
dim3 tblock(OPS_block_size_x, 1, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[2];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args, 2, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[4].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_xder1_kernel<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1],
x_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[4].time += t1 - t2;
}
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[4].mpi_time += t2 - t1;
OPS_kernels[4].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[4].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
ed5f7e2e36634218f6a846dd229e008d11dc60b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**********************************************************************************
* Numerical Solution for the Cubic Nonlinear Schrodinger Equation *
* using second order split step Fourier method. *
* Coded by: Omar Ashour, Texas A&M University at Qatar, February 2015. *
**********************************************************************************/
#include "../lib/cu_helpers.h"
#include <hipfft.h>
// Grid Parameters
#define XN nodes // Number of Fourier modes
#define TN 100 // Number of temporal nodes
#define LX 10.0 // x-spatial domain [-LX,LX)
#define TT 10.0 // Max time
#define DX (2*LX / XN) // x-spatial step size
#define DT (TT / TN) // temporal step size
// Timing parameters
#define IRVL 100 // Timing interval. Take a reading every N iterations.
// Output files
#define PLOT_F "gpu_fft_plot.m"
#define TIME_F argv[2]
// Function prototypes
__global__ void nonlin(hipfftDoubleComplex *psi, double dt, int xn);
__global__ void lin(hipfftDoubleComplex *psi, double *k2, double dt, int xn);
__global__ void normalize(hipfftDoubleComplex *psi, int size);
int main(int argc, char *argv[])
{
// Timing info
hipEvent_t begin_event, end_event;
hipEventCreate(&begin_event);
hipEventCreate(&end_event);
// Print basic info about simulation
const int nodes = atoi(argv[1]);
printf("XN: %d. DX: %f, DT: %f, dt/dx^2: %f\n", XN, DX, DT, DT/(DX*DX));
// Allocate host arrays
double *h_x = (double*)malloc(sizeof(double) * XN);
double *h_k2 = (double*)malloc(sizeof(double) * XN);
double *h_kx = (double*)malloc(XN * sizeof(double));
hipfftDoubleComplex *h_psi = (hipfftDoubleComplex*)
malloc(sizeof(hipfftDoubleComplex)*XN);
hipfftDoubleComplex *h_psi_0 = (hipfftDoubleComplex*)
malloc(sizeof(hipfftDoubleComplex)*XN);
// Create transform plans
hipfftHandle plan;
CUFFT_SAFE_CALL(hipfftPlan1d(&plan, XN, HIPFFT_Z2Z, 1));
// Create wave number
double dkx = 2*M_PI/XN/DX;
for(int i = XN/2; i >= 0; i--)
h_kx[XN/2 - i]=(XN/2 - i) * dkx;
for(int i = XN/2+1; i < XN; i++)
h_kx[i]=(i - XN) * dkx;
// Initial conditions on host
for(int i = 0; i < XN; i++)
{
h_x[i] = (i-XN/2)*DX;
h_psi[i].x = sqrt(2)/cosh(h_x[i]);
//h_psi[i].x = 2*exp(-(x[i]*x[i]/2.0/2.0));
h_psi[i].y = 0;
h_psi_0[i].x = h_psi[i].x;
h_psi_0[i].y = h_psi[i].y;
h_k2[i] = h_kx[i]*h_kx[i];
}
// Allocate device arrays and copy from host
hipfftDoubleComplex *d_psi; double *d_k2;
CUDAR_SAFE_CALL(hipMalloc(&d_psi, sizeof(hipfftDoubleComplex)*XN));
CUDAR_SAFE_CALL(hipMalloc(&d_k2, sizeof(double)*XN));
CUDAR_SAFE_CALL(hipMemcpy(d_psi, h_psi, sizeof(hipfftDoubleComplex)*XN, hipMemcpyHostToDevice));
CUDAR_SAFE_CALL(hipMemcpy(d_k2, h_k2, sizeof(double)*XN, hipMemcpyHostToDevice));
// Initialize the grid
dim3 threadsPerBlock(128,1,1);
dim3 blocksPerGrid((XN + 127)/128,1,1);
// Forward transform
CUFFT_SAFE_CALL(hipfftExecZ2Z(plan, d_psi, d_psi, HIPFFT_FORWARD));
// Timing starts here
hipEventRecord(begin_event, 0);
// Start time evolution
for (int i = 1; i <= TN; i++)
{
// Solve linear part
hipLaunchKernelGGL(( lin), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, d_k2, DT/2, XN);
#if CUDAR_ERROR_CHECKING
CUDAR_SAFE_CALL(hipPeekAtLastError());
#endif // CUDAR_ERROR_CHECKING
// Backward transform
CUFFT_SAFE_CALL(hipfftExecZ2Z(plan, d_psi, d_psi, HIPFFT_BACKWARD));
// Normalize the transform
hipLaunchKernelGGL(( normalize), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, XN);
#if CUDAR_ERROR_CHECKING
CUDAR_SAFE_CALL(hipPeekAtLastError());
#endif // CUDAR_ERROR_CHECKING
// Solve nonlinear part
hipLaunchKernelGGL(( nonlin), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, DT, XN);
#if CUDAR_ERROR_CHECKING
CUDAR_SAFE_CALL(hipPeekAtLastError());
#endif // CUDAR_ERROR_CHECKING
// Forward transform
CUFFT_SAFE_CALL(hipfftExecZ2Z(plan, d_psi, d_psi, HIPFFT_FORWARD));
// Solve linear part
hipLaunchKernelGGL(( lin), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, d_k2, DT/2, XN);
#if CUDAR_ERROR_CHECKING
CUDAR_SAFE_CALL(hipPeekAtLastError());
#endif // CUDAR_ERROR_CHECKING
}
float time_value;
hipEventRecord(end_event, 0);
hipEventSynchronize(end_event);
hipEventElapsedTime(&time_value, begin_event, end_event);
// Print time to file
FILE *fp = fopen(TIME_F, "a");
fprintf(fp, "%f, ", time_value);
fclose(fp);
// Backward tranform to retreive data
CUFFT_SAFE_CALL(hipfftExecZ2Z(plan, d_psi, d_psi, HIPFFT_BACKWARD));
// Normalize the transform
hipLaunchKernelGGL(( normalize), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, XN);
#if CUDAR_ERROR_CHECKING
CUDAR_SAFE_CALL(hipPeekAtLastError());
#endif // CUDAR_ERROR_CHECKING
// Copy results to device
CUDAR_SAFE_CALL(hipMemcpy(h_psi, d_psi, sizeof(hipfftDoubleComplex)*XN,
hipMemcpyDeviceToHost));
// Plot results
cm_plot_1d(h_psi_0, h_psi, LX, XN, PLOT_F);
// Clean up
CUFFT_SAFE_CALL(hipfftDestroy(plan));
free(h_x);
free(h_k2);
free(h_kx);
free(h_psi_0);
free(h_psi);
CUDAR_SAFE_CALL(hipFree(d_psi));
CUDAR_SAFE_CALL(hipFree(d_k2));
return 0;
}
__global__ void nonlin(hipfftDoubleComplex *psi, double dt, int xn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
// Avoid first and last point (boundary conditions) (needs fixing)
//if (i >= xn - 1 || i == 0) return;
if (i >= xn) return;
double psi2 = cuCabs(psi[i])*cuCabs(psi[i]);
psi[i] = cuCmul(psi[i], make_cuDoubleComplex(cos(psi2*dt), sin(psi2*dt)));
}
__global__ void lin(hipfftDoubleComplex *psi, double *k2, double dt, int xn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
// Avoid first and last point (boundary conditions) (needs fixing)
//if (i >= xn - 1 || i == 0) return;
if (i >= xn) return;
psi[i] = cuCmul(psi[i], make_cuDoubleComplex(cos(k2[i]*dt), -sin(k2[i]*dt)));
}
__global__ void normalize(hipfftDoubleComplex *psi, int size)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
// Stay within range since grid might be larger
if (i >= size) return;
psi[i].x = psi[i].x/size; psi[i].y = psi[i].y/size;
}
| ed5f7e2e36634218f6a846dd229e008d11dc60b0.cu | /**********************************************************************************
* Numerical Solution for the Cubic Nonlinear Schrodinger Equation *
* using second order split step Fourier method. *
* Coded by: Omar Ashour, Texas A&M University at Qatar, February 2015. *
**********************************************************************************/
#include "../lib/cu_helpers.h"
#include <cufft.h>
// Grid Parameters
#define XN nodes // Number of Fourier modes
#define TN 100 // Number of temporal nodes
#define LX 10.0 // x-spatial domain [-LX,LX)
#define TT 10.0 // Max time
#define DX (2*LX / XN) // x-spatial step size
#define DT (TT / TN) // temporal step size
// Timing parameters
#define IRVL 100 // Timing interval. Take a reading every N iterations.
// Output files
#define PLOT_F "gpu_fft_plot.m"
#define TIME_F argv[2]
// Function prototypes
__global__ void nonlin(cufftDoubleComplex *psi, double dt, int xn);
__global__ void lin(cufftDoubleComplex *psi, double *k2, double dt, int xn);
__global__ void normalize(cufftDoubleComplex *psi, int size);
int main(int argc, char *argv[])
{
// Timing info
cudaEvent_t begin_event, end_event;
cudaEventCreate(&begin_event);
cudaEventCreate(&end_event);
// Print basic info about simulation
const int nodes = atoi(argv[1]);
printf("XN: %d. DX: %f, DT: %f, dt/dx^2: %f\n", XN, DX, DT, DT/(DX*DX));
// Allocate host arrays
double *h_x = (double*)malloc(sizeof(double) * XN);
double *h_k2 = (double*)malloc(sizeof(double) * XN);
double *h_kx = (double*)malloc(XN * sizeof(double));
cufftDoubleComplex *h_psi = (cufftDoubleComplex*)
malloc(sizeof(cufftDoubleComplex)*XN);
cufftDoubleComplex *h_psi_0 = (cufftDoubleComplex*)
malloc(sizeof(cufftDoubleComplex)*XN);
// Create transform plans
cufftHandle plan;
CUFFT_SAFE_CALL(cufftPlan1d(&plan, XN, CUFFT_Z2Z, 1));
// Create wave number
double dkx = 2*M_PI/XN/DX;
for(int i = XN/2; i >= 0; i--)
h_kx[XN/2 - i]=(XN/2 - i) * dkx;
for(int i = XN/2+1; i < XN; i++)
h_kx[i]=(i - XN) * dkx;
// Initial conditions on host
for(int i = 0; i < XN; i++)
{
h_x[i] = (i-XN/2)*DX;
h_psi[i].x = sqrt(2)/cosh(h_x[i]);
//h_psi[i].x = 2*exp(-(x[i]*x[i]/2.0/2.0));
h_psi[i].y = 0;
h_psi_0[i].x = h_psi[i].x;
h_psi_0[i].y = h_psi[i].y;
h_k2[i] = h_kx[i]*h_kx[i];
}
// Allocate device arrays and copy from host
cufftDoubleComplex *d_psi; double *d_k2;
CUDAR_SAFE_CALL(cudaMalloc(&d_psi, sizeof(cufftDoubleComplex)*XN));
CUDAR_SAFE_CALL(cudaMalloc(&d_k2, sizeof(double)*XN));
CUDAR_SAFE_CALL(cudaMemcpy(d_psi, h_psi, sizeof(cufftDoubleComplex)*XN, cudaMemcpyHostToDevice));
CUDAR_SAFE_CALL(cudaMemcpy(d_k2, h_k2, sizeof(double)*XN, cudaMemcpyHostToDevice));
// Initialize the grid
dim3 threadsPerBlock(128,1,1);
dim3 blocksPerGrid((XN + 127)/128,1,1);
// Forward transform
CUFFT_SAFE_CALL(cufftExecZ2Z(plan, d_psi, d_psi, CUFFT_FORWARD));
// Timing starts here
cudaEventRecord(begin_event, 0);
// Start time evolution
for (int i = 1; i <= TN; i++)
{
// Solve linear part
lin<<<blocksPerGrid, threadsPerBlock>>>(d_psi, d_k2, DT/2, XN);
#if CUDAR_ERROR_CHECKING
CUDAR_SAFE_CALL(cudaPeekAtLastError());
#endif // CUDAR_ERROR_CHECKING
// Backward transform
CUFFT_SAFE_CALL(cufftExecZ2Z(plan, d_psi, d_psi, CUFFT_INVERSE));
// Normalize the transform
normalize<<<blocksPerGrid, threadsPerBlock>>>(d_psi, XN);
#if CUDAR_ERROR_CHECKING
CUDAR_SAFE_CALL(cudaPeekAtLastError());
#endif // CUDAR_ERROR_CHECKING
// Solve nonlinear part
nonlin<<<blocksPerGrid, threadsPerBlock>>>(d_psi, DT, XN);
#if CUDAR_ERROR_CHECKING
CUDAR_SAFE_CALL(cudaPeekAtLastError());
#endif // CUDAR_ERROR_CHECKING
// Forward transform
CUFFT_SAFE_CALL(cufftExecZ2Z(plan, d_psi, d_psi, CUFFT_FORWARD));
// Solve linear part
lin<<<blocksPerGrid, threadsPerBlock>>>(d_psi, d_k2, DT/2, XN);
#if CUDAR_ERROR_CHECKING
CUDAR_SAFE_CALL(cudaPeekAtLastError());
#endif // CUDAR_ERROR_CHECKING
}
float time_value;
cudaEventRecord(end_event, 0);
cudaEventSynchronize(end_event);
cudaEventElapsedTime(&time_value, begin_event, end_event);
// Print time to file
FILE *fp = fopen(TIME_F, "a");
fprintf(fp, "%f, ", time_value);
fclose(fp);
// Backward tranform to retreive data
CUFFT_SAFE_CALL(cufftExecZ2Z(plan, d_psi, d_psi, CUFFT_INVERSE));
// Normalize the transform
normalize<<<blocksPerGrid, threadsPerBlock>>>(d_psi, XN);
#if CUDAR_ERROR_CHECKING
CUDAR_SAFE_CALL(cudaPeekAtLastError());
#endif // CUDAR_ERROR_CHECKING
// Copy results to device
CUDAR_SAFE_CALL(cudaMemcpy(h_psi, d_psi, sizeof(cufftDoubleComplex)*XN,
cudaMemcpyDeviceToHost));
// Plot results
cm_plot_1d(h_psi_0, h_psi, LX, XN, PLOT_F);
// Clean up
CUFFT_SAFE_CALL(cufftDestroy(plan));
free(h_x);
free(h_k2);
free(h_kx);
free(h_psi_0);
free(h_psi);
CUDAR_SAFE_CALL(cudaFree(d_psi));
CUDAR_SAFE_CALL(cudaFree(d_k2));
return 0;
}
__global__ void nonlin(cufftDoubleComplex *psi, double dt, int xn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
// Avoid first and last point (boundary conditions) (needs fixing)
//if (i >= xn - 1 || i == 0) return;
if (i >= xn) return;
double psi2 = cuCabs(psi[i])*cuCabs(psi[i]);
psi[i] = cuCmul(psi[i], make_cuDoubleComplex(cos(psi2*dt), sin(psi2*dt)));
}
__global__ void lin(cufftDoubleComplex *psi, double *k2, double dt, int xn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
// Avoid first and last point (boundary conditions) (needs fixing)
//if (i >= xn - 1 || i == 0) return;
if (i >= xn) return;
psi[i] = cuCmul(psi[i], make_cuDoubleComplex(cos(k2[i]*dt), -sin(k2[i]*dt)));
}
__global__ void normalize(cufftDoubleComplex *psi, int size)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
// Stay within range since grid might be larger
if (i >= size) return;
psi[i].x = psi[i].x/size; psi[i].y = psi[i].y/size;
}
|
d38a6ec33e36ef66e4d77a4b893110923d9d82b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "caffe/util/math_functions.hpp"
#include "caffe/common.hpp"
#include "GeneralizedPatchMatch.cuh"
#include "DeepAnalogyMulti.cuh"
#include "WLS.h"
#include "Deconv.h"
#include <sys/types.h>
#include <sys/stat.h>
struct Parameters
{
std::vector<std::string> layers; //which layers used as content
int patch_size0;
int iter;
};
__host__ void norm2(float* &dst, float* src, float* smooth, Dim dim){
int count = dim.channel*dim.height*dim.width;
float* x = src;
float* x2;
hipMalloc(&x2, count*sizeof(float));
caffe_gpu_mul(count, x, x, x2);
//caculate dis
float*sum;
float* ones;
hipMalloc(&sum, dim.height*dim.width*sizeof(float));
hipMalloc(&ones, dim.channel*sizeof(float));
caffe_gpu_set(dim.channel, 1.0f, ones);
caffe_gpu_gemv(CblasTrans, dim.channel, dim.height*dim.width, 1.0f, x2, ones, 0.0f, sum);
float *dis;
hipMalloc(&dis, dim.height*dim.width*sizeof(float));
caffe_gpu_powx(dim.height*dim.width, sum, 0.5f, dis);
if (smooth != NULL)
{
hipMemcpy(smooth, sum, dim.height*dim.width*sizeof(float), hipMemcpyDeviceToDevice);
int index;
float minv, maxv;
hipblasIsamin(Caffe::cublas_handle(), dim.height*dim.width, sum, 1, &index);
hipMemcpy(&minv, sum + index - 1, sizeof(float), hipMemcpyDeviceToHost);
hipblasIsamax(Caffe::cublas_handle(), dim.height*dim.width, sum, 1, &index);
hipMemcpy(&maxv, sum + index - 1, sizeof(float), hipMemcpyDeviceToHost);
caffe_gpu_add_scalar(dim.height*dim.width, -minv, smooth);
caffe_gpu_scal(dim.height*dim.width, 1.0f / (maxv - minv), smooth);
}
//norm2
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, dim.channel, dim.width*dim.height, 1, 1.0f, ones, dis, 0.0f, x2);
caffe_gpu_div(count, src, x2, dst);
hipFree(x2);
hipFree(ones);
hipFree(dis);
hipFree(sum);
}
DeepAnalogyMulti::DeepAnalogyMulti(){
resizeRatio = 1;
weightLevel = 3;
photoTransfer = false;
list_A = "";
list_BP = "";
path_output = "";
path_model = "";
path_result_AB = "";
path_result_BA = "";
path_refine_AB = "";
path_refine_BA = "";
}
DeepAnalogyMulti::~DeepAnalogyMulti(){
}
void DeepAnalogyMulti::SetRatio(float ratio){
resizeRatio = ratio;
}
void DeepAnalogyMulti::SetBlendWeight(int level){
weightLevel = level;
}
void DeepAnalogyMulti::UsePhotoTransfer(bool flag){
photoTransfer = flag;
}
void DeepAnalogyMulti::SetModel(string path){
path_model =path;
}
void DeepAnalogyMulti::SetA(string list_a){
list_A = list_a;
}
void DeepAnalogyMulti::SetBPrime(string list_bp){
list_BP = list_bp;
}
void DeepAnalogyMulti::SetOutputDir(string dir_o){
path_output = dir_o;
if (access(path_output.c_str(),0)==-1){
cout<<path_output<<"is not existing, now make it."<<endl;
int flag = mkdir(path_output.c_str(),0777);
if (flag==0){
cout<<path_output<<"has made successfully"<<endl;
}else{
cout<<path_output<<"has made errorly"<<endl;
}
}
}
void DeepAnalogyMulti::SetGPU(int no){
hipSetDevice(no);
}
void DeepAnalogyMulti::ReadFilePath(){
// for content images files list
ifstream list_fileA;
list_fileA.open(list_A,ios::in);
if (!list_fileA.is_open())
{
cout<<"Read list file "<<list_A<<"failed!"<<endl;
return;
}
std::string file_A;
while (getline(list_fileA, file_A))
{
cout<<"Reading file: "<<file_A<<endl;
file_A_set.push_back(file_A);
int pos = file_A.find_last_of('/');
string name(file_A.substr(pos + 1) );
name_A_set.push_back(name);
}
// for style images files list
ifstream list_fileB;
list_fileB.open(list_BP,ios::in);
if (!list_fileB.is_open())
{
cout<<"Read list file "<<list_BP<<"failed!"<<endl;
return;
}
std::string file_B;
while (getline(list_fileB, file_B))
{
cout<<"Reading file: "<<file_B<<endl;
file_BP_set.push_back(file_B);
int pos = file_B.find_last_of('/');
string name(file_B.substr(pos + 1) );
name_BP_set.push_back(name);
}
}
void DeepAnalogyMulti::CheckImageSize(std::vector<cv::Mat> &ori_M,std::vector<cv::Mat> &img_M,float &R){
cv::Mat img_AL,img_BPL;
cv::Mat ori_AL = ori_M[0];
cv::Mat ori_BPL = ori_M[1];
int ratio = R;
if (ori_AL.rows > 700)
{
ratio = 700.f / ori_AL.rows;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.cols > 700)
{
ratio = 700.f / ori_AL.cols;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.rows < 200)
{
ratio = 200.f / ori_AL.rows;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.cols < 200)
{
ratio = 200.f / ori_AL.cols;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_BPL.rows > 700)
{
ratio = 700.f / ori_BPL.rows;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.cols > 700)
{
ratio = 700.f / ori_BPL.cols;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.rows < 200)
{
ratio = 200.f / ori_BPL.rows;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.cols < 200)
{
ratio = 200.f / ori_BPL.cols;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if ((ori_AL.cols*ori_AL.rows) > 350000)
{
ratio = sqrt((float)(350000) / (float)(ori_AL.cols*ori_AL.rows));
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if ((ori_BPL.cols*ori_BPL.rows) > 350000)
{
ratio = sqrt((float)(350000) / (float)(ori_BPL.cols*ori_BPL.rows));
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
int maxLateral, minLateral;
maxLateral = max(max(ori_AL.rows, ori_AL.cols), max(ori_BPL.rows, ori_BPL.cols));
minLateral = min(min(ori_AL.rows, ori_AL.cols), min(ori_BPL.rows, ori_BPL.cols));
if (maxLateral > 700 || minLateral < 200)
{
cout << "The sizes of images are not permitted. (One side cannot be larger than 700 or smaller than 200 and the area should not be larger than 350000)" << endl;
waitKey();
return;
}
img_M.push_back(img_AL);
img_M.push_back(img_BPL);
ori_M[0] = ori_AL;
ori_M[1] = ori_BPL;
R = ratio;
}
void DeepAnalogyMulti::LoadInputs(){
// read all input images
ReadFilePath();
if (file_A_set.size()!=file_BP_set.size())
return;
float ratio;
for (int i=0;i<file_A_set.size();i++){
Mat ori_AL = imread(file_A_set[i]);
Mat ori_BPL = imread(file_BP_set[i]);
if (ori_AL.empty() || ori_BPL.empty())
{
cout << "image cannot read!" << endl;
waitKey();
return;
}
ori_A_cols = ori_AL.cols;
ori_A_rows = ori_AL.rows;
ori_BP_cols = ori_BPL.cols;
ori_BP_rows = ori_BPL.rows;
std::vector<cv::Mat> ori_M, img_M;
ori_M.push_back(ori_AL);
ori_M.push_back(ori_BPL);
// check the size and area of input images
cv::Mat img_AL,img_BPL;
CheckImageSize(ori_M, img_M, ratio);
img_AL = img_M[0]; img_BPL = img_M[1];
ori_AL = ori_M[0]; ori_BPL = ori_M[1];
img_M.clear(); ori_M.clear();
cur_A_cols = ori_AL.cols;
cur_A_rows = ori_AL.rows;
cur_BP_cols = ori_BPL.cols;
cur_BP_rows = ori_BPL.rows;
if (ori_A_cols != ori_AL.cols)
{
cout << "The input image A has been resized to " << cur_A_cols << " x " << cur_A_rows << ".\n";
}
if (ori_BP_cols != ori_BPL.cols)
{
cout << "The input image B prime has been resized to " << cur_BP_cols << " x " << cur_BP_rows << ".\n";
}
cv::resize(ori_AL, img_AL, Size(), (float)cur_A_cols / ori_AL.cols, (float)cur_A_rows / ori_AL.rows, INTER_CUBIC);
cv::resize(ori_BPL, img_BPL, Size(), (float)cur_BP_cols / ori_BPL.cols, (float)cur_BP_rows / ori_BPL.rows, INTER_CUBIC);
if (img_BPL.empty()||img_AL.empty())
{
waitKey();
return;
}
// put results into vector
img_AL_set.push_back(img_AL);
img_BPL_set.push_back(img_BPL);
}
}
void DeepAnalogyMulti::ComputeAnn() {
const int param_size = 8;
int ann_size_AB, ann_size_BA;//should be assigned later
int *params_host, *params_device_AB, *params_device_BA;
unsigned int *ann_device_AB, *ann_host_AB, *ann_device_BA, *ann_host_BA;
float *annd_device_AB, *annd_host_AB, *annd_device_BA, *annd_host_BA;
char fname[256];
//set parameters
Parameters params;
params.layers.push_back("conv5_1");
params.layers.push_back("conv4_1");
params.layers.push_back("conv3_1");
params.layers.push_back("conv2_1");
params.layers.push_back("conv1_1");
params.layers.push_back("data");
std::vector<float> weight;
weight.push_back(1.0);
switch (weightLevel)
{
case 1:
weight.push_back(0.7);
weight.push_back(0.6);
weight.push_back(0.5);
weight.push_back(0.0);
break;
case 2:
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.6);
weight.push_back(0.1);
break;
case 3:
weight.push_back(0.9);
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.2);
break;
default:
weight.push_back(0.9);
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.2);
break;
}
weight.push_back(0.0);
std::vector<int> sizes;
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(5);
sizes.push_back(5);
sizes.push_back(3);
params.iter = 10;
//load caffe model
::google::InitGoogleLogging("DeepAnalogyMulti");
string model_file = "vgg19/VGG_ILSVRC_19_layers_deploy.prototxt";
string trained_file = "vgg19/VGG_ILSVRC_19_layers.caffemodel";
Classifier classifier_A(path_model + model_file, path_model + trained_file);
Classifier classifier_B(path_model + model_file, path_model + trained_file);
// for each image pair
for (int i=0;i<img_AL_set.size();i++){
//scale and enhance
float ratio = resizeRatio;
Mat img_BP, img_A;
Mat img_AL = img_AL_set[i];
Mat img_BPL = img_BPL_set[i];
cv::resize(img_AL, img_A, Size(), ratio, ratio, INTER_CUBIC);
cv::resize(img_BPL, img_BP, Size(), ratio, ratio, INTER_CUBIC);
std::vector<int> range;
if (img_A.cols > img_A.rows)
{
range.push_back(img_A.cols / 16);
}
else
{
range.push_back(img_A.rows / 16);
}
range.push_back(6);
range.push_back(6);
range.push_back(4);
range.push_back(4);
range.push_back(2);
std::vector<float *> data_A, data_A1;
data_A.resize(params.layers.size());
data_A1.resize(params.layers.size());
std::vector<Dim> data_A_size;
data_A_size.resize(params.layers.size());
classifier_A.Predict(img_A, params.layers, data_A1, data_A, data_A_size);
std::vector<float *> data_B, data_BP;
data_B.resize(params.layers.size());
data_BP.resize(params.layers.size());
std::vector<Dim> data_B_size;
data_B_size.resize(params.layers.size());
classifier_B.Predict(img_BP, params.layers, data_B, data_BP, data_B_size);
clock_t start, finish;
double duration;
start = clock();
ann_size_AB = img_AL.cols*img_AL.rows;
ann_size_BA = img_BPL.cols*img_BPL.rows;
params_host = (int *)malloc(param_size * sizeof(int));
ann_host_AB = (unsigned int *)malloc(ann_size_AB * sizeof(unsigned int));
annd_host_AB = (float *)malloc(ann_size_AB * sizeof(float));
ann_host_BA = (unsigned int *)malloc(ann_size_BA * sizeof(unsigned int));
annd_host_BA = (float *)malloc(ann_size_BA * sizeof(float));
hipMalloc(¶ms_device_AB, param_size * sizeof(int));
hipMalloc(¶ms_device_BA, param_size * sizeof(int));
hipMalloc(&ann_device_AB, ann_size_AB * sizeof(unsigned int));
hipMalloc(&annd_device_AB, ann_size_AB * sizeof(float));
hipMalloc(&ann_device_BA, ann_size_BA * sizeof(unsigned int));
hipMalloc(&annd_device_BA, ann_size_BA * sizeof(float));
int numlayer = params.layers.size();
//feature match
for (int curr_layer = 0; curr_layer < numlayer - 1; curr_layer++)//from 32 to 512
{
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
hipMemcpy(params_device_AB, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
//copy to device
hipMemcpy(params_device_BA, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
dim3 blocksPerGridAB(data_A_size[curr_layer].width / 20 + 1, data_A_size[curr_layer].height / 20 + 1, 1);
dim3 threadsPerBlockAB(20, 20, 1);
ann_size_AB = data_A_size[curr_layer].width* data_A_size[curr_layer].height;
dim3 blocksPerGridBA(data_B_size[curr_layer].width / 20 + 1, data_B_size[curr_layer].height / 20 + 1, 1);
dim3 threadsPerBlockBA(20, 20, 1);
ann_size_BA = data_B_size[curr_layer].width* data_B_size[curr_layer].height;
//initialize ann if needed
if (curr_layer == 0)//initialize, rows and cols both less than 32, just use one block
{
initialAnn_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, params_device_AB);
initialAnn_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, params_device_BA);
}
else {//upsampling, notice this block's dimension is twice the ann at this point
unsigned int * ann_tmp;
hipMalloc(&ann_tmp, ann_size_AB * sizeof(unsigned int));
upSample_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, ann_tmp, params_device_AB,
data_A_size[curr_layer - 1].width, data_A_size[curr_layer - 1].height);//get new ann_device
hipMemcpy(ann_device_AB, ann_tmp, ann_size_AB * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipFree(ann_tmp);
hipMalloc(&ann_tmp, ann_size_BA * sizeof(unsigned int));
upSample_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, ann_tmp, params_device_BA,
data_B_size[curr_layer - 1].width, data_B_size[curr_layer - 1].height);//get new ann_device
hipMemcpy(ann_device_BA, ann_tmp, ann_size_BA * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipFree(ann_tmp);
}
//norm2arlize two data
float *Ndata_A, *Ndata_A1, *Ndata_B, *Ndata_BP;
float *response_A, *response_BP;
hipMalloc(&Ndata_A, data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
hipMalloc(&Ndata_A1, data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
hipMalloc(&response_A, data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
hipMalloc(&Ndata_B, data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
hipMalloc(&Ndata_BP, data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
hipMalloc(&response_BP, data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
norm2(Ndata_A, data_A[curr_layer], response_A, data_A_size[curr_layer]);
norm2(Ndata_BP, data_BP[curr_layer], response_BP, data_B_size[curr_layer]);
Mat temp1, temp2;
cv::resize(img_AL, temp1, cv::Size(data_A_size[curr_layer].width, data_A_size[curr_layer].height));
cv::resize(img_BPL, temp2, cv::Size(data_B_size[curr_layer].width, data_B_size[curr_layer].height));
Mat response1, response2;
response1 = Mat(temp1.size(), CV_32FC1);
response2 = Mat(temp2.size(), CV_32FC1);
hipMemcpy(response1.data, response_A, data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(response2.data, response_BP, data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float), hipMemcpyDeviceToHost);
Mat response_byte1, response_byte2;
response1.convertTo(response_byte1, CV_8UC1, 255);
response2.convertTo(response_byte2, CV_8UC1, 255);
blend << <blocksPerGridAB, threadsPerBlockAB >> >(response_A, data_A[curr_layer], data_A1[curr_layer], weight[curr_layer], params_device_AB);
blend << <blocksPerGridBA, threadsPerBlockBA >> >(response_BP, data_BP[curr_layer], data_B[curr_layer], weight[curr_layer], params_device_BA);
norm2(Ndata_A1, data_A1[curr_layer], NULL, data_A_size[curr_layer]);
norm2(Ndata_B, data_B[curr_layer], NULL, data_B_size[curr_layer]);
//patchmatch
cout << "Finding nearest neighbor field using PatchMatch Algorithm at layer:" << params.layers[curr_layer] << ".\n";
patchmatch << <blocksPerGridAB, threadsPerBlockAB >> >(Ndata_A1, Ndata_BP, Ndata_A, Ndata_B, ann_device_AB, annd_device_AB, params_device_AB);
patchmatch << <blocksPerGridBA, threadsPerBlockBA >> >(Ndata_B, Ndata_A, Ndata_BP, Ndata_A1, ann_device_BA, annd_device_BA, params_device_BA);
hipFree(Ndata_A);
hipFree(Ndata_A1);
hipFree(Ndata_B);
hipFree(Ndata_BP);
hipFree(response_A);
hipFree(response_BP);
//deconv
if (curr_layer < numlayer - 2)
{
int next_layer = curr_layer + 2;
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
hipMemcpy(params_device_AB, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
//copy to device
hipMemcpy(params_device_BA, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
blocksPerGridAB = dim3(data_A_size[curr_layer].width / 20 + 1, data_A_size[curr_layer].height / 20 + 1, 1);
threadsPerBlockAB = dim3(20, 20, 1);
ann_size_AB = data_A_size[curr_layer].width* data_A_size[curr_layer].height;
blocksPerGridBA = dim3(data_B_size[curr_layer].width / 20 + 1, data_B_size[curr_layer].height / 20 + 1, 1);
threadsPerBlockBA = dim3(20, 20, 1);
ann_size_BA = data_B_size[curr_layer].width* data_B_size[curr_layer].height;
int num1 = data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height;
int num2 = data_A_size[next_layer].channel*data_A_size[next_layer].width*data_A_size[next_layer].height;
float *target;
hipMalloc(&target, num1 * sizeof(float));
avg_vote << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, data_BP[curr_layer], target, params_device_AB);
deconv(&classifier_A, params.layers[curr_layer], target, data_A_size[curr_layer], params.layers[next_layer], data_A1[next_layer], data_A_size[next_layer]);
hipFree(target);
num1 = data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height;
num2 = data_B_size[next_layer].channel*data_B_size[next_layer].width*data_B_size[next_layer].height;
hipMalloc(&target, num1 * sizeof(float));
avg_vote << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, data_A[curr_layer], target, params_device_BA);
deconv(&classifier_B, params.layers[curr_layer], target, data_B_size[curr_layer], params.layers[next_layer], data_B[next_layer], data_B_size[next_layer]);
hipFree(target);
}
}
//upsample
int curr_layer = numlayer - 1;
{
//set parameters
params_host[0] = 3;//channels
params_host[1] = img_AL.rows;
params_host[2] = img_AL.cols;
params_host[3] = img_BPL.rows;
params_host[4] = img_BPL.cols;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
hipMemcpy(params_device_AB, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
//set parameters
params_host[0] = 3;//channels
params_host[1] = img_BPL.rows;
params_host[2] = img_BPL.cols;
params_host[3] = img_AL.rows;
params_host[4] = img_AL.cols;
//copy to device
hipMemcpy(params_device_BA, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
dim3 blocksPerGridAB(img_AL.cols / 20 + 1, img_AL.rows / 20 + 1, 1);
dim3 threadsPerBlockAB(20, 20, 1);
ann_size_AB = img_AL.cols* img_AL.rows;
dim3 blocksPerGridBA(img_BPL.cols / 20 + 1, img_BPL.rows / 20 + 1, 1);
dim3 threadsPerBlockBA(20, 20, 1);
ann_size_BA = img_BPL.rows* img_BPL.cols;
//updample
unsigned int * ann_tmp;
hipMalloc(&ann_tmp, ann_size_AB * sizeof(unsigned int));
upSample_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, ann_tmp, params_device_AB,
data_A_size[curr_layer - 1].width, data_A_size[curr_layer - 1].height);//get new ann_device
hipMemcpy(ann_device_AB, ann_tmp, ann_size_AB * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipFree(ann_tmp);
hipMalloc(&ann_tmp, ann_size_BA * sizeof(unsigned int));
upSample_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, ann_tmp, params_device_BA,
data_B_size[curr_layer - 1].width, data_B_size[curr_layer - 1].height);//get new ann_device
hipMemcpy(ann_device_BA, ann_tmp, ann_size_BA * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipFree(ann_tmp);
hipMemcpy(ann_host_AB, ann_device_AB, ann_size_AB * sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(ann_host_BA, ann_device_BA, ann_size_BA * sizeof(unsigned int), hipMemcpyDeviceToHost);
//free space in device, only need to free pa and pb which are created temporarily
//image downBAale
Mat flow, result_AB, result_BA, err, out, norm2al;
flow = reconstruct_dflow(img_AL, img_BPL, ann_host_AB, sizes[curr_layer]);
result_AB = reconstruct_avg(img_AL, img_BPL, ann_host_AB, sizes[curr_layer]);
cv::resize(result_AB, out, Size(), (float)ori_A_cols / cur_A_cols, (float)ori_A_rows / cur_A_rows, INTER_CUBIC);
imwrite(path_result_AB + "result_" + name_A_set[i], out);
flow = reconstruct_dflow(img_BPL, img_AL, ann_host_BA, sizes[curr_layer]);
result_BA = reconstruct_avg(img_BPL, img_AL, ann_host_BA, sizes[curr_layer]);
cv::resize(result_BA, out, Size(), (float)ori_BP_cols / cur_BP_cols, (float)ori_BP_rows / cur_BP_rows, INTER_CUBIC);
imwrite(path_result_BA + "result_" + name_BP_set[i], out);
if (photoTransfer)
{
cout << "Refining photo transfer." << endl;
Mat filtered_AB, filtered_BA, filtered_A, filtered_B, refine_AB, refine_BA;
Mat origin_A, origin_B, res_AB, res_BA;
img_AL.convertTo(origin_A, CV_32FC3, 1/255.0);
img_BPL.convertTo(origin_B, CV_32FC3, 1 / 255.0);
result_AB.convertTo(res_AB, CV_32FC3, 1 / 255.0);
result_BA.convertTo(res_BA, CV_32FC3, 1 / 255.0);
WeightedLeastSquare(filtered_AB, origin_A, res_AB);
WeightedLeastSquare(filtered_BA, origin_B, res_BA);
WeightedLeastSquare(filtered_A, origin_A, origin_A);
WeightedLeastSquare(filtered_B, origin_B, origin_B);
refine_AB = origin_A + filtered_AB - filtered_A;
refine_BA = origin_B + filtered_BA - filtered_B;
refine_AB.convertTo(norm2al, CV_32FC3, 255.0);
cv::resize(norm2al, out, Size(), (float)ori_A_cols / cur_A_cols, (float)ori_A_rows / cur_A_rows, INTER_CUBIC);
//imwrite(path_output + fname, out);
imwrite(path_refine_AB + "refine_" + name_A_set[i], out);
refine_BA.convertTo(norm2al, CV_32FC3, 255.0);
cv::resize(norm2al, out, Size(), (float)ori_BP_cols / cur_BP_cols, (float)ori_BP_rows / cur_BP_rows, INTER_CUBIC);
//imwrite(path_output + fname, out);
imwrite(path_refine_BA + "refine_" + name_BP_set[i], out);
}
}
/*
cout << "Saving flow result." << "\n";
//save ann
{
ofstream output1;
char fname[256];
sprintf(fname, "flowAB.txt");
output1.open(path_output + fname);
for (int y = 0; y < img_AL.rows; y++)
for (int x = 0; x < img_AL.cols; x++)
{
unsigned int v = ann_host_AB[y*img_AL.cols + x];
int xbest = INT_TO_X(v);
int ybest = INT_TO_Y(v);
output1 << xbest - x << " " << ybest - y << endl;
}
output1.close();
ofstream output2;
sprintf(fname, "flowBA.txt");
output2.open(path_output + fname);
for (int y = 0; y < img_BPL.rows; y++){
for (int x = 0; x < img_BPL.cols; x++)
{
unsigned int v = ann_host_BA[y*img_BPL.cols + x];
int xbest = INT_TO_X(v);
int ybest = INT_TO_Y(v);
output2 << xbest - x << " " << ybest - y << endl;
}
}
output2.close();
}
*/
hipFree(params_device_AB);
hipFree(ann_device_AB);
hipFree(annd_device_AB);
hipFree(params_device_BA);
hipFree(ann_device_BA);
hipFree(annd_device_BA);
free(ann_host_AB);
free(annd_host_AB);
free(ann_host_BA);
free(annd_host_BA);
free(params_host);
for (int i = 0; i < numlayer; i++)
{
hipFree(data_A[i]);
hipFree(data_BP[i]);
}
finish = clock();
duration = (double)(finish - start) / CLOCKS_PER_SEC;
cout << "Finished finding ann. Time : " << duration << endl;
}
google::ShutdownGoogleLogging();
classifier_A.DeleteNet();
classifier_B.DeleteNet();
}
void DeepAnalogyMulti::MakeOutputDir(){
// prepare result directory
path_result_AB = path_output + "result_AB/";
path_result_BA = path_output + "result_BA/";
path_refine_AB = path_output + "refine_AB/";
path_refine_BA = path_output + "refine_BA/";
int flag1, flag2, flag3, flag4;
flag1 = 0; flag2 = 0; flag3 = 0; flag4 = 0;
if (access(path_result_AB.c_str(),0)==-1){
flag1 = mkdir(path_result_AB.c_str(),0777);
}
if (access(path_result_BA.c_str(),0)==-1){
flag2 = mkdir(path_result_BA.c_str(),0777);
}
if (photoTransfer)
{
if (access(path_refine_AB.c_str(),0)==-1){
flag3 = mkdir(path_refine_AB.c_str(),0777);
}
if (access(path_refine_BA.c_str(),0)==-1){
flag4 = mkdir(path_refine_BA.c_str(),0777);
}
}
if (flag1==0 && flag2==0 && flag3==0 && flag4==0){
cout<<"Result directories are prepared successfully"<<endl;
}else{
cout<<"Result directories are prepared errorly"<<endl;
}
}
| d38a6ec33e36ef66e4d77a4b893110923d9d82b3.cu | #include "caffe/util/math_functions.hpp"
#include "caffe/common.hpp"
#include "GeneralizedPatchMatch.cuh"
#include "DeepAnalogyMulti.cuh"
#include "WLS.h"
#include "Deconv.h"
#include <sys/types.h>
#include <sys/stat.h>
struct Parameters
{
std::vector<std::string> layers; //which layers used as content
int patch_size0;
int iter;
};
__host__ void norm2(float* &dst, float* src, float* smooth, Dim dim){
int count = dim.channel*dim.height*dim.width;
float* x = src;
float* x2;
cudaMalloc(&x2, count*sizeof(float));
caffe_gpu_mul(count, x, x, x2);
//caculate dis
float*sum;
float* ones;
cudaMalloc(&sum, dim.height*dim.width*sizeof(float));
cudaMalloc(&ones, dim.channel*sizeof(float));
caffe_gpu_set(dim.channel, 1.0f, ones);
caffe_gpu_gemv(CblasTrans, dim.channel, dim.height*dim.width, 1.0f, x2, ones, 0.0f, sum);
float *dis;
cudaMalloc(&dis, dim.height*dim.width*sizeof(float));
caffe_gpu_powx(dim.height*dim.width, sum, 0.5f, dis);
if (smooth != NULL)
{
cudaMemcpy(smooth, sum, dim.height*dim.width*sizeof(float), cudaMemcpyDeviceToDevice);
int index;
float minv, maxv;
cublasIsamin(Caffe::cublas_handle(), dim.height*dim.width, sum, 1, &index);
cudaMemcpy(&minv, sum + index - 1, sizeof(float), cudaMemcpyDeviceToHost);
cublasIsamax(Caffe::cublas_handle(), dim.height*dim.width, sum, 1, &index);
cudaMemcpy(&maxv, sum + index - 1, sizeof(float), cudaMemcpyDeviceToHost);
caffe_gpu_add_scalar(dim.height*dim.width, -minv, smooth);
caffe_gpu_scal(dim.height*dim.width, 1.0f / (maxv - minv), smooth);
}
//norm2
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, dim.channel, dim.width*dim.height, 1, 1.0f, ones, dis, 0.0f, x2);
caffe_gpu_div(count, src, x2, dst);
cudaFree(x2);
cudaFree(ones);
cudaFree(dis);
cudaFree(sum);
}
DeepAnalogyMulti::DeepAnalogyMulti(){
resizeRatio = 1;
weightLevel = 3;
photoTransfer = false;
list_A = "";
list_BP = "";
path_output = "";
path_model = "";
path_result_AB = "";
path_result_BA = "";
path_refine_AB = "";
path_refine_BA = "";
}
DeepAnalogyMulti::~DeepAnalogyMulti(){
}
void DeepAnalogyMulti::SetRatio(float ratio){
resizeRatio = ratio;
}
void DeepAnalogyMulti::SetBlendWeight(int level){
weightLevel = level;
}
void DeepAnalogyMulti::UsePhotoTransfer(bool flag){
photoTransfer = flag;
}
void DeepAnalogyMulti::SetModel(string path){
path_model =path;
}
void DeepAnalogyMulti::SetA(string list_a){
list_A = list_a;
}
void DeepAnalogyMulti::SetBPrime(string list_bp){
list_BP = list_bp;
}
void DeepAnalogyMulti::SetOutputDir(string dir_o){
path_output = dir_o;
if (access(path_output.c_str(),0)==-1){
cout<<path_output<<"is not existing, now make it."<<endl;
int flag = mkdir(path_output.c_str(),0777);
if (flag==0){
cout<<path_output<<"has made successfully"<<endl;
}else{
cout<<path_output<<"has made errorly"<<endl;
}
}
}
void DeepAnalogyMulti::SetGPU(int no){
cudaSetDevice(no);
}
void DeepAnalogyMulti::ReadFilePath(){
// for content images files list
ifstream list_fileA;
list_fileA.open(list_A,ios::in);
if (!list_fileA.is_open())
{
cout<<"Read list file "<<list_A<<"failed!"<<endl;
return;
}
std::string file_A;
while (getline(list_fileA, file_A))
{
cout<<"Reading file: "<<file_A<<endl;
file_A_set.push_back(file_A);
int pos = file_A.find_last_of('/');
string name(file_A.substr(pos + 1) );
name_A_set.push_back(name);
}
// for style images files list
ifstream list_fileB;
list_fileB.open(list_BP,ios::in);
if (!list_fileB.is_open())
{
cout<<"Read list file "<<list_BP<<"failed!"<<endl;
return;
}
std::string file_B;
while (getline(list_fileB, file_B))
{
cout<<"Reading file: "<<file_B<<endl;
file_BP_set.push_back(file_B);
int pos = file_B.find_last_of('/');
string name(file_B.substr(pos + 1) );
name_BP_set.push_back(name);
}
}
void DeepAnalogyMulti::CheckImageSize(std::vector<cv::Mat> &ori_M,std::vector<cv::Mat> &img_M,float &R){
cv::Mat img_AL,img_BPL;
cv::Mat ori_AL = ori_M[0];
cv::Mat ori_BPL = ori_M[1];
int ratio = R;
if (ori_AL.rows > 700)
{
ratio = 700.f / ori_AL.rows;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.cols > 700)
{
ratio = 700.f / ori_AL.cols;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.rows < 200)
{
ratio = 200.f / ori_AL.rows;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.cols < 200)
{
ratio = 200.f / ori_AL.cols;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_BPL.rows > 700)
{
ratio = 700.f / ori_BPL.rows;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.cols > 700)
{
ratio = 700.f / ori_BPL.cols;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.rows < 200)
{
ratio = 200.f / ori_BPL.rows;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.cols < 200)
{
ratio = 200.f / ori_BPL.cols;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if ((ori_AL.cols*ori_AL.rows) > 350000)
{
ratio = sqrt((float)(350000) / (float)(ori_AL.cols*ori_AL.rows));
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if ((ori_BPL.cols*ori_BPL.rows) > 350000)
{
ratio = sqrt((float)(350000) / (float)(ori_BPL.cols*ori_BPL.rows));
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
int maxLateral, minLateral;
maxLateral = max(max(ori_AL.rows, ori_AL.cols), max(ori_BPL.rows, ori_BPL.cols));
minLateral = min(min(ori_AL.rows, ori_AL.cols), min(ori_BPL.rows, ori_BPL.cols));
if (maxLateral > 700 || minLateral < 200)
{
cout << "The sizes of images are not permitted. (One side cannot be larger than 700 or smaller than 200 and the area should not be larger than 350000)" << endl;
waitKey();
return;
}
img_M.push_back(img_AL);
img_M.push_back(img_BPL);
ori_M[0] = ori_AL;
ori_M[1] = ori_BPL;
R = ratio;
}
void DeepAnalogyMulti::LoadInputs(){
// read all input images
ReadFilePath();
if (file_A_set.size()!=file_BP_set.size())
return;
float ratio;
for (int i=0;i<file_A_set.size();i++){
Mat ori_AL = imread(file_A_set[i]);
Mat ori_BPL = imread(file_BP_set[i]);
if (ori_AL.empty() || ori_BPL.empty())
{
cout << "image cannot read!" << endl;
waitKey();
return;
}
ori_A_cols = ori_AL.cols;
ori_A_rows = ori_AL.rows;
ori_BP_cols = ori_BPL.cols;
ori_BP_rows = ori_BPL.rows;
std::vector<cv::Mat> ori_M, img_M;
ori_M.push_back(ori_AL);
ori_M.push_back(ori_BPL);
// check the size and area of input images
cv::Mat img_AL,img_BPL;
CheckImageSize(ori_M, img_M, ratio);
img_AL = img_M[0]; img_BPL = img_M[1];
ori_AL = ori_M[0]; ori_BPL = ori_M[1];
img_M.clear(); ori_M.clear();
cur_A_cols = ori_AL.cols;
cur_A_rows = ori_AL.rows;
cur_BP_cols = ori_BPL.cols;
cur_BP_rows = ori_BPL.rows;
if (ori_A_cols != ori_AL.cols)
{
cout << "The input image A has been resized to " << cur_A_cols << " x " << cur_A_rows << ".\n";
}
if (ori_BP_cols != ori_BPL.cols)
{
cout << "The input image B prime has been resized to " << cur_BP_cols << " x " << cur_BP_rows << ".\n";
}
cv::resize(ori_AL, img_AL, Size(), (float)cur_A_cols / ori_AL.cols, (float)cur_A_rows / ori_AL.rows, INTER_CUBIC);
cv::resize(ori_BPL, img_BPL, Size(), (float)cur_BP_cols / ori_BPL.cols, (float)cur_BP_rows / ori_BPL.rows, INTER_CUBIC);
if (img_BPL.empty()||img_AL.empty())
{
waitKey();
return;
}
// put results into vector
img_AL_set.push_back(img_AL);
img_BPL_set.push_back(img_BPL);
}
}
void DeepAnalogyMulti::ComputeAnn() {
const int param_size = 8;
int ann_size_AB, ann_size_BA;//should be assigned later
int *params_host, *params_device_AB, *params_device_BA;
unsigned int *ann_device_AB, *ann_host_AB, *ann_device_BA, *ann_host_BA;
float *annd_device_AB, *annd_host_AB, *annd_device_BA, *annd_host_BA;
char fname[256];
//set parameters
Parameters params;
params.layers.push_back("conv5_1");
params.layers.push_back("conv4_1");
params.layers.push_back("conv3_1");
params.layers.push_back("conv2_1");
params.layers.push_back("conv1_1");
params.layers.push_back("data");
std::vector<float> weight;
weight.push_back(1.0);
switch (weightLevel)
{
case 1:
weight.push_back(0.7);
weight.push_back(0.6);
weight.push_back(0.5);
weight.push_back(0.0);
break;
case 2:
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.6);
weight.push_back(0.1);
break;
case 3:
weight.push_back(0.9);
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.2);
break;
default:
weight.push_back(0.9);
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.2);
break;
}
weight.push_back(0.0);
std::vector<int> sizes;
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(5);
sizes.push_back(5);
sizes.push_back(3);
params.iter = 10;
//load caffe model
::google::InitGoogleLogging("DeepAnalogyMulti");
string model_file = "vgg19/VGG_ILSVRC_19_layers_deploy.prototxt";
string trained_file = "vgg19/VGG_ILSVRC_19_layers.caffemodel";
Classifier classifier_A(path_model + model_file, path_model + trained_file);
Classifier classifier_B(path_model + model_file, path_model + trained_file);
// for each image pair
for (int i=0;i<img_AL_set.size();i++){
//scale and enhance
float ratio = resizeRatio;
Mat img_BP, img_A;
Mat img_AL = img_AL_set[i];
Mat img_BPL = img_BPL_set[i];
cv::resize(img_AL, img_A, Size(), ratio, ratio, INTER_CUBIC);
cv::resize(img_BPL, img_BP, Size(), ratio, ratio, INTER_CUBIC);
std::vector<int> range;
if (img_A.cols > img_A.rows)
{
range.push_back(img_A.cols / 16);
}
else
{
range.push_back(img_A.rows / 16);
}
range.push_back(6);
range.push_back(6);
range.push_back(4);
range.push_back(4);
range.push_back(2);
std::vector<float *> data_A, data_A1;
data_A.resize(params.layers.size());
data_A1.resize(params.layers.size());
std::vector<Dim> data_A_size;
data_A_size.resize(params.layers.size());
classifier_A.Predict(img_A, params.layers, data_A1, data_A, data_A_size);
std::vector<float *> data_B, data_BP;
data_B.resize(params.layers.size());
data_BP.resize(params.layers.size());
std::vector<Dim> data_B_size;
data_B_size.resize(params.layers.size());
classifier_B.Predict(img_BP, params.layers, data_B, data_BP, data_B_size);
clock_t start, finish;
double duration;
start = clock();
ann_size_AB = img_AL.cols*img_AL.rows;
ann_size_BA = img_BPL.cols*img_BPL.rows;
params_host = (int *)malloc(param_size * sizeof(int));
ann_host_AB = (unsigned int *)malloc(ann_size_AB * sizeof(unsigned int));
annd_host_AB = (float *)malloc(ann_size_AB * sizeof(float));
ann_host_BA = (unsigned int *)malloc(ann_size_BA * sizeof(unsigned int));
annd_host_BA = (float *)malloc(ann_size_BA * sizeof(float));
cudaMalloc(¶ms_device_AB, param_size * sizeof(int));
cudaMalloc(¶ms_device_BA, param_size * sizeof(int));
cudaMalloc(&ann_device_AB, ann_size_AB * sizeof(unsigned int));
cudaMalloc(&annd_device_AB, ann_size_AB * sizeof(float));
cudaMalloc(&ann_device_BA, ann_size_BA * sizeof(unsigned int));
cudaMalloc(&annd_device_BA, ann_size_BA * sizeof(float));
int numlayer = params.layers.size();
//feature match
for (int curr_layer = 0; curr_layer < numlayer - 1; curr_layer++)//from 32 to 512
{
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
cudaMemcpy(params_device_AB, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
//copy to device
cudaMemcpy(params_device_BA, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
dim3 blocksPerGridAB(data_A_size[curr_layer].width / 20 + 1, data_A_size[curr_layer].height / 20 + 1, 1);
dim3 threadsPerBlockAB(20, 20, 1);
ann_size_AB = data_A_size[curr_layer].width* data_A_size[curr_layer].height;
dim3 blocksPerGridBA(data_B_size[curr_layer].width / 20 + 1, data_B_size[curr_layer].height / 20 + 1, 1);
dim3 threadsPerBlockBA(20, 20, 1);
ann_size_BA = data_B_size[curr_layer].width* data_B_size[curr_layer].height;
//initialize ann if needed
if (curr_layer == 0)//initialize, rows and cols both less than 32, just use one block
{
initialAnn_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, params_device_AB);
initialAnn_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, params_device_BA);
}
else {//upsampling, notice this block's dimension is twice the ann at this point
unsigned int * ann_tmp;
cudaMalloc(&ann_tmp, ann_size_AB * sizeof(unsigned int));
upSample_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, ann_tmp, params_device_AB,
data_A_size[curr_layer - 1].width, data_A_size[curr_layer - 1].height);//get new ann_device
cudaMemcpy(ann_device_AB, ann_tmp, ann_size_AB * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaFree(ann_tmp);
cudaMalloc(&ann_tmp, ann_size_BA * sizeof(unsigned int));
upSample_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, ann_tmp, params_device_BA,
data_B_size[curr_layer - 1].width, data_B_size[curr_layer - 1].height);//get new ann_device
cudaMemcpy(ann_device_BA, ann_tmp, ann_size_BA * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaFree(ann_tmp);
}
//norm2arlize two data
float *Ndata_A, *Ndata_A1, *Ndata_B, *Ndata_BP;
float *response_A, *response_BP;
cudaMalloc(&Ndata_A, data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
cudaMalloc(&Ndata_A1, data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
cudaMalloc(&response_A, data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
cudaMalloc(&Ndata_B, data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
cudaMalloc(&Ndata_BP, data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
cudaMalloc(&response_BP, data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
norm2(Ndata_A, data_A[curr_layer], response_A, data_A_size[curr_layer]);
norm2(Ndata_BP, data_BP[curr_layer], response_BP, data_B_size[curr_layer]);
Mat temp1, temp2;
cv::resize(img_AL, temp1, cv::Size(data_A_size[curr_layer].width, data_A_size[curr_layer].height));
cv::resize(img_BPL, temp2, cv::Size(data_B_size[curr_layer].width, data_B_size[curr_layer].height));
Mat response1, response2;
response1 = Mat(temp1.size(), CV_32FC1);
response2 = Mat(temp2.size(), CV_32FC1);
cudaMemcpy(response1.data, response_A, data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(response2.data, response_BP, data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float), cudaMemcpyDeviceToHost);
Mat response_byte1, response_byte2;
response1.convertTo(response_byte1, CV_8UC1, 255);
response2.convertTo(response_byte2, CV_8UC1, 255);
blend << <blocksPerGridAB, threadsPerBlockAB >> >(response_A, data_A[curr_layer], data_A1[curr_layer], weight[curr_layer], params_device_AB);
blend << <blocksPerGridBA, threadsPerBlockBA >> >(response_BP, data_BP[curr_layer], data_B[curr_layer], weight[curr_layer], params_device_BA);
norm2(Ndata_A1, data_A1[curr_layer], NULL, data_A_size[curr_layer]);
norm2(Ndata_B, data_B[curr_layer], NULL, data_B_size[curr_layer]);
//patchmatch
cout << "Finding nearest neighbor field using PatchMatch Algorithm at layer:" << params.layers[curr_layer] << ".\n";
patchmatch << <blocksPerGridAB, threadsPerBlockAB >> >(Ndata_A1, Ndata_BP, Ndata_A, Ndata_B, ann_device_AB, annd_device_AB, params_device_AB);
patchmatch << <blocksPerGridBA, threadsPerBlockBA >> >(Ndata_B, Ndata_A, Ndata_BP, Ndata_A1, ann_device_BA, annd_device_BA, params_device_BA);
cudaFree(Ndata_A);
cudaFree(Ndata_A1);
cudaFree(Ndata_B);
cudaFree(Ndata_BP);
cudaFree(response_A);
cudaFree(response_BP);
//deconv
if (curr_layer < numlayer - 2)
{
int next_layer = curr_layer + 2;
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
cudaMemcpy(params_device_AB, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
//copy to device
cudaMemcpy(params_device_BA, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
blocksPerGridAB = dim3(data_A_size[curr_layer].width / 20 + 1, data_A_size[curr_layer].height / 20 + 1, 1);
threadsPerBlockAB = dim3(20, 20, 1);
ann_size_AB = data_A_size[curr_layer].width* data_A_size[curr_layer].height;
blocksPerGridBA = dim3(data_B_size[curr_layer].width / 20 + 1, data_B_size[curr_layer].height / 20 + 1, 1);
threadsPerBlockBA = dim3(20, 20, 1);
ann_size_BA = data_B_size[curr_layer].width* data_B_size[curr_layer].height;
int num1 = data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height;
int num2 = data_A_size[next_layer].channel*data_A_size[next_layer].width*data_A_size[next_layer].height;
float *target;
cudaMalloc(&target, num1 * sizeof(float));
avg_vote << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, data_BP[curr_layer], target, params_device_AB);
deconv(&classifier_A, params.layers[curr_layer], target, data_A_size[curr_layer], params.layers[next_layer], data_A1[next_layer], data_A_size[next_layer]);
cudaFree(target);
num1 = data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height;
num2 = data_B_size[next_layer].channel*data_B_size[next_layer].width*data_B_size[next_layer].height;
cudaMalloc(&target, num1 * sizeof(float));
avg_vote << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, data_A[curr_layer], target, params_device_BA);
deconv(&classifier_B, params.layers[curr_layer], target, data_B_size[curr_layer], params.layers[next_layer], data_B[next_layer], data_B_size[next_layer]);
cudaFree(target);
}
}
//upsample
int curr_layer = numlayer - 1;
{
//set parameters
params_host[0] = 3;//channels
params_host[1] = img_AL.rows;
params_host[2] = img_AL.cols;
params_host[3] = img_BPL.rows;
params_host[4] = img_BPL.cols;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
cudaMemcpy(params_device_AB, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
//set parameters
params_host[0] = 3;//channels
params_host[1] = img_BPL.rows;
params_host[2] = img_BPL.cols;
params_host[3] = img_AL.rows;
params_host[4] = img_AL.cols;
//copy to device
cudaMemcpy(params_device_BA, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
dim3 blocksPerGridAB(img_AL.cols / 20 + 1, img_AL.rows / 20 + 1, 1);
dim3 threadsPerBlockAB(20, 20, 1);
ann_size_AB = img_AL.cols* img_AL.rows;
dim3 blocksPerGridBA(img_BPL.cols / 20 + 1, img_BPL.rows / 20 + 1, 1);
dim3 threadsPerBlockBA(20, 20, 1);
ann_size_BA = img_BPL.rows* img_BPL.cols;
//updample
unsigned int * ann_tmp;
cudaMalloc(&ann_tmp, ann_size_AB * sizeof(unsigned int));
upSample_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, ann_tmp, params_device_AB,
data_A_size[curr_layer - 1].width, data_A_size[curr_layer - 1].height);//get new ann_device
cudaMemcpy(ann_device_AB, ann_tmp, ann_size_AB * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaFree(ann_tmp);
cudaMalloc(&ann_tmp, ann_size_BA * sizeof(unsigned int));
upSample_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, ann_tmp, params_device_BA,
data_B_size[curr_layer - 1].width, data_B_size[curr_layer - 1].height);//get new ann_device
cudaMemcpy(ann_device_BA, ann_tmp, ann_size_BA * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaFree(ann_tmp);
cudaMemcpy(ann_host_AB, ann_device_AB, ann_size_AB * sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(ann_host_BA, ann_device_BA, ann_size_BA * sizeof(unsigned int), cudaMemcpyDeviceToHost);
//free space in device, only need to free pa and pb which are created temporarily
//image downBAale
Mat flow, result_AB, result_BA, err, out, norm2al;
flow = reconstruct_dflow(img_AL, img_BPL, ann_host_AB, sizes[curr_layer]);
result_AB = reconstruct_avg(img_AL, img_BPL, ann_host_AB, sizes[curr_layer]);
cv::resize(result_AB, out, Size(), (float)ori_A_cols / cur_A_cols, (float)ori_A_rows / cur_A_rows, INTER_CUBIC);
imwrite(path_result_AB + "result_" + name_A_set[i], out);
flow = reconstruct_dflow(img_BPL, img_AL, ann_host_BA, sizes[curr_layer]);
result_BA = reconstruct_avg(img_BPL, img_AL, ann_host_BA, sizes[curr_layer]);
cv::resize(result_BA, out, Size(), (float)ori_BP_cols / cur_BP_cols, (float)ori_BP_rows / cur_BP_rows, INTER_CUBIC);
imwrite(path_result_BA + "result_" + name_BP_set[i], out);
if (photoTransfer)
{
cout << "Refining photo transfer." << endl;
Mat filtered_AB, filtered_BA, filtered_A, filtered_B, refine_AB, refine_BA;
Mat origin_A, origin_B, res_AB, res_BA;
img_AL.convertTo(origin_A, CV_32FC3, 1/255.0);
img_BPL.convertTo(origin_B, CV_32FC3, 1 / 255.0);
result_AB.convertTo(res_AB, CV_32FC3, 1 / 255.0);
result_BA.convertTo(res_BA, CV_32FC3, 1 / 255.0);
WeightedLeastSquare(filtered_AB, origin_A, res_AB);
WeightedLeastSquare(filtered_BA, origin_B, res_BA);
WeightedLeastSquare(filtered_A, origin_A, origin_A);
WeightedLeastSquare(filtered_B, origin_B, origin_B);
refine_AB = origin_A + filtered_AB - filtered_A;
refine_BA = origin_B + filtered_BA - filtered_B;
refine_AB.convertTo(norm2al, CV_32FC3, 255.0);
cv::resize(norm2al, out, Size(), (float)ori_A_cols / cur_A_cols, (float)ori_A_rows / cur_A_rows, INTER_CUBIC);
//imwrite(path_output + fname, out);
imwrite(path_refine_AB + "refine_" + name_A_set[i], out);
refine_BA.convertTo(norm2al, CV_32FC3, 255.0);
cv::resize(norm2al, out, Size(), (float)ori_BP_cols / cur_BP_cols, (float)ori_BP_rows / cur_BP_rows, INTER_CUBIC);
//imwrite(path_output + fname, out);
imwrite(path_refine_BA + "refine_" + name_BP_set[i], out);
}
}
/*
cout << "Saving flow result." << "\n";
//save ann
{
ofstream output1;
char fname[256];
sprintf(fname, "flowAB.txt");
output1.open(path_output + fname);
for (int y = 0; y < img_AL.rows; y++)
for (int x = 0; x < img_AL.cols; x++)
{
unsigned int v = ann_host_AB[y*img_AL.cols + x];
int xbest = INT_TO_X(v);
int ybest = INT_TO_Y(v);
output1 << xbest - x << " " << ybest - y << endl;
}
output1.close();
ofstream output2;
sprintf(fname, "flowBA.txt");
output2.open(path_output + fname);
for (int y = 0; y < img_BPL.rows; y++){
for (int x = 0; x < img_BPL.cols; x++)
{
unsigned int v = ann_host_BA[y*img_BPL.cols + x];
int xbest = INT_TO_X(v);
int ybest = INT_TO_Y(v);
output2 << xbest - x << " " << ybest - y << endl;
}
}
output2.close();
}
*/
cudaFree(params_device_AB);
cudaFree(ann_device_AB);
cudaFree(annd_device_AB);
cudaFree(params_device_BA);
cudaFree(ann_device_BA);
cudaFree(annd_device_BA);
free(ann_host_AB);
free(annd_host_AB);
free(ann_host_BA);
free(annd_host_BA);
free(params_host);
for (int i = 0; i < numlayer; i++)
{
cudaFree(data_A[i]);
cudaFree(data_BP[i]);
}
finish = clock();
duration = (double)(finish - start) / CLOCKS_PER_SEC;
cout << "Finished finding ann. Time : " << duration << endl;
}
google::ShutdownGoogleLogging();
classifier_A.DeleteNet();
classifier_B.DeleteNet();
}
void DeepAnalogyMulti::MakeOutputDir(){
// prepare result directory
path_result_AB = path_output + "result_AB/";
path_result_BA = path_output + "result_BA/";
path_refine_AB = path_output + "refine_AB/";
path_refine_BA = path_output + "refine_BA/";
int flag1, flag2, flag3, flag4;
flag1 = 0; flag2 = 0; flag3 = 0; flag4 = 0;
if (access(path_result_AB.c_str(),0)==-1){
flag1 = mkdir(path_result_AB.c_str(),0777);
}
if (access(path_result_BA.c_str(),0)==-1){
flag2 = mkdir(path_result_BA.c_str(),0777);
}
if (photoTransfer)
{
if (access(path_refine_AB.c_str(),0)==-1){
flag3 = mkdir(path_refine_AB.c_str(),0777);
}
if (access(path_refine_BA.c_str(),0)==-1){
flag4 = mkdir(path_refine_BA.c_str(),0777);
}
}
if (flag1==0 && flag2==0 && flag3==0 && flag4==0){
cout<<"Result directories are prepared successfully"<<endl;
}else{
cout<<"Result directories are prepared errorly"<<endl;
}
}
|
99d9023fcfa4061482a279764aa9abd76586f213.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "kernel.hip"
#include "support.cu"
int main (int argc, char *argv[])
{
Timer timer;
hipError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
char *A_h, *B_h;
char *A_d, *B_d;
int *tab_h, *tab_d, *table;
int *top_d, *left_d;
int *top_h, *left_h;
size_t A_sz, B_sz, diag_loop;
int loop, i,j, k, l, m;
int A_block_dim, B_block_dim;
int A_grid, B_grid;
unsigned VecSize;
//dim3 dim_grid, dim_block;
if (argc == 1) {
VecSize = 5120;
A_sz = VecSize;
B_sz = VecSize;
} else if (argc == 2) {
VecSize = atoi(argv[1]);
if (VecSize > 40960) {
VecSize = 40960;
}
A_sz = VecSize;
B_sz = VecSize;
} else if (argc == 3) {
A_sz = atoi(argv[1]);
B_sz = atoi(argv[2]);
} else {
printf("\nOh no!\nUsage: ./vecAdd <Size>");
exit(0);
}
printf("Dimention of:\nA %d\nB %d\n", A_sz, B_sz);
//setting up input__________ start
//set standard seed
srand(217);
//A_sz + 1 for null pointer
A_h = (char*) malloc( sizeof(char)*(A_sz+1));
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%26)+'a';}
//B_sz + 1 for null pointer
B_h = (char*) malloc( sizeof(char)*(B_sz+1) );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%26)+'a';}
//A_h[0] = B_h[0] = 'A';//adding a char out of range as null
A_h[A_sz] = B_h[B_sz] = '\0';//null pointer at the end
//printf("A %s\nB %s\n",A_h,B_h);
//setting up input__________ finish
top_h = (int*) malloc (sizeof(int) * (BLOCK_SIZE+1));
left_h = (int*) malloc (sizeof(int) * (BLOCK_SIZE+1));
tab_h = (int*) malloc (sizeof(int) * (BLOCK_SIZE*BLOCK_SIZE));
table = (int*) malloc (sizeof(int) * (A_sz*B_sz));
if (table == NULL) {
printf("table malloc failed\n");
}
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" size Of vector: %u x %u\n ", VecSize);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipMalloc((void**) &A_d, sizeof(char) * (A_sz+1));
hipMalloc((void**) &B_d, sizeof(char) * (B_sz+1));
hipMalloc((void**) &tab_d, sizeof(int) * (BLOCK_SIZE*BLOCK_SIZE));
hipMalloc((void**) &top_d, sizeof(int) * (BLOCK_SIZE+1));
hipMalloc((void**) &left_d, sizeof(int) * (BLOCK_SIZE+1));
hipMemset(tab_d, 0, sizeof(int) * (BLOCK_SIZE*BLOCK_SIZE));//always
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipMemcpy(A_d, A_h, sizeof(char) * (A_sz+1), hipMemcpyHostToDevice);
hipMemcpy(B_d, B_h, sizeof(char) * (B_sz+1), hipMemcpyHostToDevice);
//hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ---------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
//breaking up the input into tiles/grids
A_block_dim = (A_sz-1)/BLOCK_SIZE +1;
B_block_dim = (B_sz-1)/BLOCK_SIZE +1;
for (A_grid = 0; A_grid < A_block_dim; A_grid++) {
for (B_grid = 0; B_grid < B_block_dim; B_grid++) {
if (A_grid == 0) {
hipMemset(top_d, 0, sizeof(int) * (BLOCK_SIZE+1));
} else {
if (B_grid == 0) {//corner case
top_h[0] = 0;
} else {
top_h[0] = table[(A_grid*BLOCK_SIZE-1)*B_sz + B_grid*BLOCK_SIZE-1];
}
memcpy((top_h+1),(table + (A_grid*BLOCK_SIZE-1)*B_sz + B_grid*BLOCK_SIZE), sizeof(int) * BLOCK_SIZE);
hipMemcpy(top_d, top_h, sizeof(int) * (BLOCK_SIZE+1), hipMemcpyHostToDevice);
}
if (B_grid == 0) {
hipMemset(left_d, 0, sizeof(int) * (BLOCK_SIZE+1));
} else {
if (A_grid == 0) {//corner case
left_h[0] = 0;
} else {
left_h[0] = table[(A_grid*BLOCK_SIZE-1)*B_sz + B_grid*BLOCK_SIZE-1];
}
for (m = 1; m <= BLOCK_SIZE; m++) {
left_h[m] = table[(A_grid*BLOCK_SIZE+m-1)*B_sz + B_grid*BLOCK_SIZE-1];
}
hipMemcpy(left_d, left_h, sizeof(int) * (BLOCK_SIZE+1), hipMemcpyHostToDevice);
}
diag_loop = 2*BLOCK_SIZE;
for (loop = 0;loop <= diag_loop; loop++) {
lcsKernel(A_grid, B_grid, tab_d, A_d, B_d, top_d, left_d, A_sz, B_sz, loop);
}
hipMemcpy(tab_h, tab_d, sizeof(int) * (BLOCK_SIZE*BLOCK_SIZE), hipMemcpyDeviceToHost);
for (k = 0; k < BLOCK_SIZE; k++) {
for (l = 0; l < BLOCK_SIZE; l++) {
if ( ((A_grid*BLOCK_SIZE+k) < A_sz) && ((B_grid*BLOCK_SIZE+l) < B_sz) ) {
table[(A_grid*BLOCK_SIZE+k)*B_sz + (B_grid*BLOCK_SIZE+l)] = tab_h[k*BLOCK_SIZE+l];
}
}
}
}
}
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
//INSERT CODE HERE
//hipDeviceSynchronize();
// Verify correctness -----------------------------------------------------
verify(A_h, B_h, table, A_sz, B_sz);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(tab_h);
free(top_h);
free(left_h);
free(table);
//INSERT CODE HERE
hipFree(A_d);
hipFree(B_d);
hipFree(tab_d);
hipFree(top_d);
hipFree(left_d);
return 0;
}
| 99d9023fcfa4061482a279764aa9abd76586f213.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "kernel.cu"
#include "support.cu"
int main (int argc, char *argv[])
{
Timer timer;
cudaError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
char *A_h, *B_h;
char *A_d, *B_d;
int *tab_h, *tab_d, *table;
int *top_d, *left_d;
int *top_h, *left_h;
size_t A_sz, B_sz, diag_loop;
int loop, i,j, k, l, m;
int A_block_dim, B_block_dim;
int A_grid, B_grid;
unsigned VecSize;
//dim3 dim_grid, dim_block;
if (argc == 1) {
VecSize = 5120;
A_sz = VecSize;
B_sz = VecSize;
} else if (argc == 2) {
VecSize = atoi(argv[1]);
if (VecSize > 40960) {
VecSize = 40960;
}
A_sz = VecSize;
B_sz = VecSize;
} else if (argc == 3) {
A_sz = atoi(argv[1]);
B_sz = atoi(argv[2]);
} else {
printf("\nOh no!\nUsage: ./vecAdd <Size>");
exit(0);
}
printf("Dimention of:\nA %d\nB %d\n", A_sz, B_sz);
//setting up input__________ start
//set standard seed
srand(217);
//A_sz + 1 for null pointer
A_h = (char*) malloc( sizeof(char)*(A_sz+1));
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%26)+'a';}
//B_sz + 1 for null pointer
B_h = (char*) malloc( sizeof(char)*(B_sz+1) );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%26)+'a';}
//A_h[0] = B_h[0] = 'A';//adding a char out of range as null
A_h[A_sz] = B_h[B_sz] = '\0';//null pointer at the end
//printf("A %s\nB %s\n",A_h,B_h);
//setting up input__________ finish
top_h = (int*) malloc (sizeof(int) * (BLOCK_SIZE+1));
left_h = (int*) malloc (sizeof(int) * (BLOCK_SIZE+1));
tab_h = (int*) malloc (sizeof(int) * (BLOCK_SIZE*BLOCK_SIZE));
table = (int*) malloc (sizeof(int) * (A_sz*B_sz));
if (table == NULL) {
printf("table malloc failed\n");
}
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" size Of vector: %u x %u\n ", VecSize);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaMalloc((void**) &A_d, sizeof(char) * (A_sz+1));
cudaMalloc((void**) &B_d, sizeof(char) * (B_sz+1));
cudaMalloc((void**) &tab_d, sizeof(int) * (BLOCK_SIZE*BLOCK_SIZE));
cudaMalloc((void**) &top_d, sizeof(int) * (BLOCK_SIZE+1));
cudaMalloc((void**) &left_d, sizeof(int) * (BLOCK_SIZE+1));
cudaMemset(tab_d, 0, sizeof(int) * (BLOCK_SIZE*BLOCK_SIZE));//always
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaMemcpy(A_d, A_h, sizeof(char) * (A_sz+1), cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, sizeof(char) * (B_sz+1), cudaMemcpyHostToDevice);
//cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ---------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
//breaking up the input into tiles/grids
A_block_dim = (A_sz-1)/BLOCK_SIZE +1;
B_block_dim = (B_sz-1)/BLOCK_SIZE +1;
for (A_grid = 0; A_grid < A_block_dim; A_grid++) {
for (B_grid = 0; B_grid < B_block_dim; B_grid++) {
if (A_grid == 0) {
cudaMemset(top_d, 0, sizeof(int) * (BLOCK_SIZE+1));
} else {
if (B_grid == 0) {//corner case
top_h[0] = 0;
} else {
top_h[0] = table[(A_grid*BLOCK_SIZE-1)*B_sz + B_grid*BLOCK_SIZE-1];
}
memcpy((top_h+1),(table + (A_grid*BLOCK_SIZE-1)*B_sz + B_grid*BLOCK_SIZE), sizeof(int) * BLOCK_SIZE);
cudaMemcpy(top_d, top_h, sizeof(int) * (BLOCK_SIZE+1), cudaMemcpyHostToDevice);
}
if (B_grid == 0) {
cudaMemset(left_d, 0, sizeof(int) * (BLOCK_SIZE+1));
} else {
if (A_grid == 0) {//corner case
left_h[0] = 0;
} else {
left_h[0] = table[(A_grid*BLOCK_SIZE-1)*B_sz + B_grid*BLOCK_SIZE-1];
}
for (m = 1; m <= BLOCK_SIZE; m++) {
left_h[m] = table[(A_grid*BLOCK_SIZE+m-1)*B_sz + B_grid*BLOCK_SIZE-1];
}
cudaMemcpy(left_d, left_h, sizeof(int) * (BLOCK_SIZE+1), cudaMemcpyHostToDevice);
}
diag_loop = 2*BLOCK_SIZE;
for (loop = 0;loop <= diag_loop; loop++) {
lcsKernel(A_grid, B_grid, tab_d, A_d, B_d, top_d, left_d, A_sz, B_sz, loop);
}
cudaMemcpy(tab_h, tab_d, sizeof(int) * (BLOCK_SIZE*BLOCK_SIZE), cudaMemcpyDeviceToHost);
for (k = 0; k < BLOCK_SIZE; k++) {
for (l = 0; l < BLOCK_SIZE; l++) {
if ( ((A_grid*BLOCK_SIZE+k) < A_sz) && ((B_grid*BLOCK_SIZE+l) < B_sz) ) {
table[(A_grid*BLOCK_SIZE+k)*B_sz + (B_grid*BLOCK_SIZE+l)] = tab_h[k*BLOCK_SIZE+l];
}
}
}
}
}
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
//INSERT CODE HERE
//cudaDeviceSynchronize();
// Verify correctness -----------------------------------------------------
verify(A_h, B_h, table, A_sz, B_sz);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(tab_h);
free(top_h);
free(left_h);
free(table);
//INSERT CODE HERE
cudaFree(A_d);
cudaFree(B_d);
cudaFree(tab_d);
cudaFree(top_d);
cudaFree(left_d);
return 0;
}
|
d5deb58dfd6f86ced7aca07fb1dfd21a583d17be.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpu_grey_and_blur.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *Pout = NULL;
hipMalloc(&Pout, XSIZE*YSIZE);
unsigned char *Pin = NULL;
hipMalloc(&Pin, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpu_grey_and_blur), dim3(gridBlock),dim3(threadBlock), 0, 0, Pout,Pin,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpu_grey_and_blur), dim3(gridBlock),dim3(threadBlock), 0, 0, Pout,Pin,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpu_grey_and_blur), dim3(gridBlock),dim3(threadBlock), 0, 0, Pout,Pin,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d5deb58dfd6f86ced7aca07fb1dfd21a583d17be.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpu_grey_and_blur.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *Pout = NULL;
cudaMalloc(&Pout, XSIZE*YSIZE);
unsigned char *Pin = NULL;
cudaMalloc(&Pin, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpu_grey_and_blur<<<gridBlock,threadBlock>>>(Pout,Pin,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpu_grey_and_blur<<<gridBlock,threadBlock>>>(Pout,Pin,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpu_grey_and_blur<<<gridBlock,threadBlock>>>(Pout,Pin,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
94013e71245663c63584946a8f59417dab468bde.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/memory/channel_shuffle.h"
#include "cudakernel/common/divmod_fast.h"
#include "cudakernel/common/memory_utils.h"
#include "ppl/nn/common/tensor_shape.h"
#include "ppl/common/retcode.h"
#include "cudakernel/common/common.h"
template <typename T>
__global__ void ppl_cukernel_channel_shuffle(
int64_t num_elems,
int32_t group,
int32_t channels_per_group,
GArray<DivModFast> input_strides_fast,
const T* input,
T* output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
int64_t output_offset = 0;
int n_idx, c_idx, hw_idx, remain = index;
input_strides_fast[0].divmod(remain, n_idx, remain);
output_offset += (index - remain);
input_strides_fast[1].divmod(remain, c_idx, remain);
hw_idx = remain;
int out_c_idx = c_idx % channels_per_group * group + c_idx / channels_per_group;
output_offset += out_c_idx * input_strides_fast[1].d_ + hw_idx;
output[output_offset] = input[index];
}
template <typename T>
__global__ void ppl_cukernel_channel_shuffle_nhwc(
int64_t num_elems,
int32_t group,
int channels_per_group,
int pad_channels,
DivModFast channels_fast,
const T *input,
T *output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
int64_t input_offset = 0;
int64_t output_offset = 0;
int nhw_idx, c_idx, remain = index;
channels_fast.divmod(remain, nhw_idx, c_idx);
int out_c_idx = c_idx % channels_per_group * group + c_idx / channels_per_group;
input_offset += nhw_idx * pad_channels + c_idx;
output_offset += nhw_idx * pad_channels + out_c_idx;
output[output_offset] = input[input_offset];
}
ppl::common::RetCode PPLCUDAChannelShuffleForwardImp(
hipStream_t stream,
int group,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* output_shape,
void* output)
{
// num_dims must be equal to 4
int num_dims = output_shape->GetDimCount();
int64_t num_elems = output_shape->GetElementsExcludingPadding();
// for ndarray layout
int num_input_strides_dims = num_dims - 2;
GArray<DivModFast> input_strides_fast(num_input_strides_dims);
int elems_hw = input_shape->GetDim(2) * input_shape->GetDim(3);
input_strides_fast[1] = DivModFast(elems_hw);
int elems_chw = input_shape->GetDim(1) * elems_hw;
input_strides_fast[0] = DivModFast(elems_chw);
// for nhwc layout
int pad_channels = input_shape->GetDim(1) + input_shape->GetPadding0(1) + input_shape->GetPadding1(1);
DivModFast channels_fast(input_shape->GetDim(1));
int block_size = 256;
int grid_size = (num_elems + block_size - 1) / block_size;
int channels_per_group = input_shape->GetDim(1) / group;
#define SWITCH_CASE(TYPE) \
case sizeof(TYPE): { \
if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC){ \
hipLaunchKernelGGL(( ppl_cukernel_channel_shuffle_nhwc), dim3(grid_size), dim3(block_size), 0, stream, \
num_elems, group, channels_per_group, pad_channels, channels_fast, \
(const TYPE *)input, (TYPE *)output); \
} else { \
hipLaunchKernelGGL(( ppl_cukernel_channel_shuffle), dim3(grid_size), dim3(block_size), 0, stream, \
num_elems, group, channels_per_group, input_strides_fast, (const TYPE *)input, (TYPE *)output); \
} \
return ppl::common::RC_SUCCESS; \
} \
switch (ppl::common::GetSizeOfDataType(input_shape->GetDataType())) {
SWITCH_CASE(int8_t);
SWITCH_CASE(int16_t);
SWITCH_CASE(int32_t);
SWITCH_CASE(int64_t);
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
} | 94013e71245663c63584946a8f59417dab468bde.cu | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/memory/channel_shuffle.h"
#include "cudakernel/common/divmod_fast.h"
#include "cudakernel/common/memory_utils.h"
#include "ppl/nn/common/tensor_shape.h"
#include "ppl/common/retcode.h"
#include "cudakernel/common/common.h"
template <typename T>
__global__ void ppl_cukernel_channel_shuffle(
int64_t num_elems,
int32_t group,
int32_t channels_per_group,
GArray<DivModFast> input_strides_fast,
const T* input,
T* output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
int64_t output_offset = 0;
int n_idx, c_idx, hw_idx, remain = index;
input_strides_fast[0].divmod(remain, n_idx, remain);
output_offset += (index - remain);
input_strides_fast[1].divmod(remain, c_idx, remain);
hw_idx = remain;
int out_c_idx = c_idx % channels_per_group * group + c_idx / channels_per_group;
output_offset += out_c_idx * input_strides_fast[1].d_ + hw_idx;
output[output_offset] = input[index];
}
template <typename T>
__global__ void ppl_cukernel_channel_shuffle_nhwc(
int64_t num_elems,
int32_t group,
int channels_per_group,
int pad_channels,
DivModFast channels_fast,
const T *input,
T *output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
int64_t input_offset = 0;
int64_t output_offset = 0;
int nhw_idx, c_idx, remain = index;
channels_fast.divmod(remain, nhw_idx, c_idx);
int out_c_idx = c_idx % channels_per_group * group + c_idx / channels_per_group;
input_offset += nhw_idx * pad_channels + c_idx;
output_offset += nhw_idx * pad_channels + out_c_idx;
output[output_offset] = input[input_offset];
}
ppl::common::RetCode PPLCUDAChannelShuffleForwardImp(
cudaStream_t stream,
int group,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* output_shape,
void* output)
{
// num_dims must be equal to 4
int num_dims = output_shape->GetDimCount();
int64_t num_elems = output_shape->GetElementsExcludingPadding();
// for ndarray layout
int num_input_strides_dims = num_dims - 2;
GArray<DivModFast> input_strides_fast(num_input_strides_dims);
int elems_hw = input_shape->GetDim(2) * input_shape->GetDim(3);
input_strides_fast[1] = DivModFast(elems_hw);
int elems_chw = input_shape->GetDim(1) * elems_hw;
input_strides_fast[0] = DivModFast(elems_chw);
// for nhwc layout
int pad_channels = input_shape->GetDim(1) + input_shape->GetPadding0(1) + input_shape->GetPadding1(1);
DivModFast channels_fast(input_shape->GetDim(1));
int block_size = 256;
int grid_size = (num_elems + block_size - 1) / block_size;
int channels_per_group = input_shape->GetDim(1) / group;
#define SWITCH_CASE(TYPE) \
case sizeof(TYPE): { \
if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC){ \
ppl_cukernel_channel_shuffle_nhwc<<<grid_size, block_size, 0, stream>>>( \
num_elems, group, channels_per_group, pad_channels, channels_fast, \
(const TYPE *)input, (TYPE *)output); \
} else { \
ppl_cukernel_channel_shuffle<<<grid_size, block_size, 0, stream>>>( \
num_elems, group, channels_per_group, input_strides_fast, (const TYPE *)input, (TYPE *)output); \
} \
return ppl::common::RC_SUCCESS; \
} \
switch (ppl::common::GetSizeOfDataType(input_shape->GetDataType())) {
SWITCH_CASE(int8_t);
SWITCH_CASE(int16_t);
SWITCH_CASE(int32_t);
SWITCH_CASE(int64_t);
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
} |
524e781a8ed24d686f65cddc989b32bd8baea942.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file Gaussian.cu
* @details This file describes the functions belonging to Gaussian class.
* @author Antonio Jose Lazaro Munoz.
* @date 20/02/2016
*/
#include "Gaussian.h"
#include "Gaussian_kernel.cu"
Gaussian::Gaussian(int s)
{
Size = s;
}
Gaussian::~Gaussian()
{
//Free host memory
if(a != NULL) hipHostFree(a);
if(b != NULL) hipHostFree(b);
if(m != NULL) hipHostFree(m);
if(finalVec != NULL) delete [] finalVec;
//Free device memory
if(m_cuda != NULL) hipFree(m_cuda);
if(a_cuda != NULL) hipFree(a_cuda);
if(b_cuda != NULL) hipFree(b_cuda);
}
void Gaussian::allocHostMemory(void)
{
hipHostMalloc((void **)&a, Size * Size * sizeof(float));
hipHostMalloc((void **)&b, Size * Size * sizeof(float));
hipHostMalloc((void **)&m, Size * Size * sizeof(float));
finalVec = new float[Size];
}
void Gaussian::freeHostMemory(void)
{
//Free host memory
if(a != NULL) hipHostFree(a);
if(b != NULL) hipHostFree(b);
if(m != NULL) hipHostFree(m);
if(finalVec != NULL) delete [] finalVec;
}
void Gaussian::allocDeviceMemory(void)
{
hipMalloc((void **) &m_cuda, Size * Size * sizeof(float));
hipMalloc((void **) &a_cuda, Size * Size * sizeof(float));
hipMalloc((void **) &b_cuda, Size * sizeof(float));
}
void Gaussian::freeDeviceMemory(void)
{
if(m_cuda != NULL) hipFree(m_cuda);
if(a_cuda != NULL) hipFree(a_cuda);
if(b_cuda != NULL) hipFree(b_cuda);
}
void Gaussian::generatingData(void)
{
create_matrix(a, Size);
for (int j =0; j< Size; j++)
b[j]=1.0;
for (int i=0; i<Size*Size; i++)
m[i] = 0.0;
}
void Gaussian::memHostToDeviceAsync(hipStream_t stream)
{
hipMemcpyAsync(m_cuda, m, Size * Size * sizeof(float), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(a_cuda, a, Size * Size * sizeof(float), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(b_cuda, b, Size * sizeof(float), hipMemcpyHostToDevice, stream);
}
void Gaussian::memHostToDevice(void)
{
hipMemcpy(m_cuda, m, Size * Size * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(a_cuda, a, Size * Size * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(b_cuda, b, Size * sizeof(float), hipMemcpyHostToDevice);
}
void Gaussian::memDeviceToHostAsync(hipStream_t stream)
{
hipMemcpyAsync(m, m_cuda, Size * Size * sizeof(float), hipMemcpyDeviceToHost, stream);
hipMemcpyAsync(a, a_cuda, Size * Size * sizeof(float), hipMemcpyDeviceToHost, stream);
hipMemcpyAsync(b, b_cuda, Size * sizeof(float), hipMemcpyDeviceToHost, stream);
}
void Gaussian::memDeviceToHost(void)
{
hipMemcpy(m, m_cuda, Size * Size * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(a, a_cuda, Size * Size * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(b, b_cuda, Size * sizeof(float), hipMemcpyDeviceToHost);
}
void Gaussian::launch_kernel_Async(hipStream_t stream)
{
int block_size,grid_size;
block_size = MAXBLOCKSIZE_GAUSSIAN;
grid_size = (Size/block_size) + (!(Size%block_size)? 0:1);
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
int blockSize2d, gridSize2d;
blockSize2d = BLOCK_SIZE_XY_GAUSSIAN;
gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1));
dim3 dimBlockXY(blockSize2d,blockSize2d);
dim3 dimGridXY(gridSize2d,gridSize2d);
for (int t=0; t<(Size-1); t++) {
hipLaunchKernelGGL(( Fan1), dim3(dimGrid),dim3(dimBlock), 0, stream, m_cuda,a_cuda,Size,t);
hipLaunchKernelGGL(( Fan2), dim3(dimGridXY),dim3(dimBlockXY), 0, stream, m_cuda,a_cuda,b_cuda,Size,Size-t,t);
}
}
void Gaussian::launch_kernel(void)
{
int block_size,grid_size;
block_size = MAXBLOCKSIZE_GAUSSIAN;
grid_size = (Size/block_size) + (!(Size%block_size)? 0:1);
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
int blockSize2d, gridSize2d;
blockSize2d = BLOCK_SIZE_XY_GAUSSIAN;
gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1));
dim3 dimBlockXY(blockSize2d,blockSize2d);
dim3 dimGridXY(gridSize2d,gridSize2d);
for (int t=0; t<(Size-1); t++) {
hipLaunchKernelGGL(( Fan1), dim3(dimGrid),dim3(dimBlock), 0, 0, m_cuda,a_cuda,Size,t);
hipLaunchKernelGGL(( Fan2), dim3(dimGridXY),dim3(dimBlockXY), 0, 0, m_cuda,a_cuda,b_cuda,Size,Size-t,t);
}
}
void Gaussian::checkResults(void)
{
BackSub();
}
void Gaussian::getBytesHTD(int *bytes_htd)
{
*bytes_htd = (2*(Size * Size * sizeof(float))) + (Size * sizeof(float));
}
void Gaussian::getBytesDTH(int *bytes_dth)
{
*bytes_dth = (2*(Size * Size * sizeof(float))) + (Size * sizeof(float));
}
void Gaussian::getTimeEstimations_HTD_DTH(int gpu, float *estimated_time_HTD, float *estimated_time_DTH,
float *estimated_overlapped_time_HTD, float *estimated_overlapped_time_DTH,
float LoHTD, float LoDTH, float GHTD, float GDTH, float overlappedGHTD, float overlappedGDTH)
{
hipDeviceProp_t props;
hipGetDeviceProperties(&props, gpu);
int bytes_HTD;
int bytes_DTH;
getBytesHTD(&bytes_HTD);
getBytesDTH(&bytes_DTH);
*estimated_time_HTD = LoHTD + (bytes_HTD) * GHTD;
*estimated_overlapped_time_HTD = 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_HTD = LoHTD + (bytes_HTD) * overlappedGHTD;
*estimated_time_DTH = LoDTH + (bytes_DTH) * GDTH;
*estimated_overlapped_time_DTH= 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_DTH= LoDTH + (bytes_DTH) * overlappedGDTH;
}
void Gaussian::create_matrix(float *m, int size)
{
int i,j;
float lamda = -0.01;
float coe[2*size-1];
float coe_i =0.0;
for (i=0; i < size; i++)
{
coe_i = 10*exp(lamda*i);
j=size-1+i;
coe[j]=coe_i;
j=size-1-i;
coe[j]=coe_i;
}
for (i=0; i < size; i++) {
for (j=0; j < size; j++) {
m[i*size+j]=coe[size-1-i+j];
}
}
}
void Gaussian::BackSub(void)
{
// solve "bottom up"
int i,j;
for(i=0;i<Size;i++){
finalVec[Size-i-1]=b[Size-i-1];
for(j=0;j<i;j++)
{
finalVec[Size-i-1]-=*(a+Size*(Size-i-1)+(Size-j-1)) * finalVec[Size-j-1];
}
finalVec[Size-i-1]=finalVec[Size-i-1]/ *(a+Size*(Size-i-1)+(Size-i-1));
}
}
| 524e781a8ed24d686f65cddc989b32bd8baea942.cu | /**
* @file Gaussian.cu
* @details This file describes the functions belonging to Gaussian class.
* @author Antonio Jose Lazaro Munoz.
* @date 20/02/2016
*/
#include "Gaussian.h"
#include "Gaussian_kernel.cu"
Gaussian::Gaussian(int s)
{
Size = s;
}
Gaussian::~Gaussian()
{
//Free host memory
if(a != NULL) cudaFreeHost(a);
if(b != NULL) cudaFreeHost(b);
if(m != NULL) cudaFreeHost(m);
if(finalVec != NULL) delete [] finalVec;
//Free device memory
if(m_cuda != NULL) cudaFree(m_cuda);
if(a_cuda != NULL) cudaFree(a_cuda);
if(b_cuda != NULL) cudaFree(b_cuda);
}
void Gaussian::allocHostMemory(void)
{
cudaMallocHost((void **)&a, Size * Size * sizeof(float));
cudaMallocHost((void **)&b, Size * Size * sizeof(float));
cudaMallocHost((void **)&m, Size * Size * sizeof(float));
finalVec = new float[Size];
}
void Gaussian::freeHostMemory(void)
{
//Free host memory
if(a != NULL) cudaFreeHost(a);
if(b != NULL) cudaFreeHost(b);
if(m != NULL) cudaFreeHost(m);
if(finalVec != NULL) delete [] finalVec;
}
void Gaussian::allocDeviceMemory(void)
{
cudaMalloc((void **) &m_cuda, Size * Size * sizeof(float));
cudaMalloc((void **) &a_cuda, Size * Size * sizeof(float));
cudaMalloc((void **) &b_cuda, Size * sizeof(float));
}
void Gaussian::freeDeviceMemory(void)
{
if(m_cuda != NULL) cudaFree(m_cuda);
if(a_cuda != NULL) cudaFree(a_cuda);
if(b_cuda != NULL) cudaFree(b_cuda);
}
void Gaussian::generatingData(void)
{
create_matrix(a, Size);
for (int j =0; j< Size; j++)
b[j]=1.0;
for (int i=0; i<Size*Size; i++)
m[i] = 0.0;
}
void Gaussian::memHostToDeviceAsync(cudaStream_t stream)
{
cudaMemcpyAsync(m_cuda, m, Size * Size * sizeof(float), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(a_cuda, a, Size * Size * sizeof(float), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(b_cuda, b, Size * sizeof(float), cudaMemcpyHostToDevice, stream);
}
void Gaussian::memHostToDevice(void)
{
cudaMemcpy(m_cuda, m, Size * Size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(a_cuda, a, Size * Size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_cuda, b, Size * sizeof(float), cudaMemcpyHostToDevice);
}
void Gaussian::memDeviceToHostAsync(cudaStream_t stream)
{
cudaMemcpyAsync(m, m_cuda, Size * Size * sizeof(float), cudaMemcpyDeviceToHost, stream);
cudaMemcpyAsync(a, a_cuda, Size * Size * sizeof(float), cudaMemcpyDeviceToHost, stream);
cudaMemcpyAsync(b, b_cuda, Size * sizeof(float), cudaMemcpyDeviceToHost, stream);
}
void Gaussian::memDeviceToHost(void)
{
cudaMemcpy(m, m_cuda, Size * Size * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(a, a_cuda, Size * Size * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(b, b_cuda, Size * sizeof(float), cudaMemcpyDeviceToHost);
}
void Gaussian::launch_kernel_Async(cudaStream_t stream)
{
int block_size,grid_size;
block_size = MAXBLOCKSIZE_GAUSSIAN;
grid_size = (Size/block_size) + (!(Size%block_size)? 0:1);
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
int blockSize2d, gridSize2d;
blockSize2d = BLOCK_SIZE_XY_GAUSSIAN;
gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1));
dim3 dimBlockXY(blockSize2d,blockSize2d);
dim3 dimGridXY(gridSize2d,gridSize2d);
for (int t=0; t<(Size-1); t++) {
Fan1<<<dimGrid,dimBlock, 0, stream>>>(m_cuda,a_cuda,Size,t);
Fan2<<<dimGridXY,dimBlockXY, 0, stream>>>(m_cuda,a_cuda,b_cuda,Size,Size-t,t);
}
}
void Gaussian::launch_kernel(void)
{
int block_size,grid_size;
block_size = MAXBLOCKSIZE_GAUSSIAN;
grid_size = (Size/block_size) + (!(Size%block_size)? 0:1);
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
int blockSize2d, gridSize2d;
blockSize2d = BLOCK_SIZE_XY_GAUSSIAN;
gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1));
dim3 dimBlockXY(blockSize2d,blockSize2d);
dim3 dimGridXY(gridSize2d,gridSize2d);
for (int t=0; t<(Size-1); t++) {
Fan1<<<dimGrid,dimBlock>>>(m_cuda,a_cuda,Size,t);
Fan2<<<dimGridXY,dimBlockXY>>>(m_cuda,a_cuda,b_cuda,Size,Size-t,t);
}
}
void Gaussian::checkResults(void)
{
BackSub();
}
void Gaussian::getBytesHTD(int *bytes_htd)
{
*bytes_htd = (2*(Size * Size * sizeof(float))) + (Size * sizeof(float));
}
void Gaussian::getBytesDTH(int *bytes_dth)
{
*bytes_dth = (2*(Size * Size * sizeof(float))) + (Size * sizeof(float));
}
void Gaussian::getTimeEstimations_HTD_DTH(int gpu, float *estimated_time_HTD, float *estimated_time_DTH,
float *estimated_overlapped_time_HTD, float *estimated_overlapped_time_DTH,
float LoHTD, float LoDTH, float GHTD, float GDTH, float overlappedGHTD, float overlappedGDTH)
{
cudaDeviceProp props;
cudaGetDeviceProperties(&props, gpu);
int bytes_HTD;
int bytes_DTH;
getBytesHTD(&bytes_HTD);
getBytesDTH(&bytes_DTH);
*estimated_time_HTD = LoHTD + (bytes_HTD) * GHTD;
*estimated_overlapped_time_HTD = 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_HTD = LoHTD + (bytes_HTD) * overlappedGHTD;
*estimated_time_DTH = LoDTH + (bytes_DTH) * GDTH;
*estimated_overlapped_time_DTH= 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_DTH= LoDTH + (bytes_DTH) * overlappedGDTH;
}
void Gaussian::create_matrix(float *m, int size)
{
int i,j;
float lamda = -0.01;
float coe[2*size-1];
float coe_i =0.0;
for (i=0; i < size; i++)
{
coe_i = 10*exp(lamda*i);
j=size-1+i;
coe[j]=coe_i;
j=size-1-i;
coe[j]=coe_i;
}
for (i=0; i < size; i++) {
for (j=0; j < size; j++) {
m[i*size+j]=coe[size-1-i+j];
}
}
}
void Gaussian::BackSub(void)
{
// solve "bottom up"
int i,j;
for(i=0;i<Size;i++){
finalVec[Size-i-1]=b[Size-i-1];
for(j=0;j<i;j++)
{
finalVec[Size-i-1]-=*(a+Size*(Size-i-1)+(Size-j-1)) * finalVec[Size-j-1];
}
finalVec[Size-i-1]=finalVec[Size-i-1]/ *(a+Size*(Size-i-1)+(Size-i-1));
}
}
|
fa272ca20d070249b2ff5f69c0cfd988d92ec512.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <torch/library.h>
#include <THH/THHAtomics.cuh>
#include "cuda_helpers.h"
namespace vision {
namespace ops {
namespace {
template <typename T>
__global__ void ps_roi_pool_forward_kernel_impl(
int nthreads,
const T* input,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
const T* rois,
int channels_out,
T* output,
int* channel_mapping) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c_out, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c_out = (index / pooled_width / pooled_height) % channels_out;
int n = index / pooled_width / pooled_height / channels_out;
// (n, c_in, ph, pw) is the associated element in the input
int c_in = (c_out * pooled_height + ph) * pooled_width + pw;
// [start, end) interval for spatial sampling
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
int roi_start_w = roundf(offset_rois[1] * spatial_scale);
int roi_start_h = roundf(offset_rois[2] * spatial_scale);
int roi_end_w = roundf(offset_rois[3] * spatial_scale);
int roi_end_h = roundf(offset_rois[4] * spatial_scale);
// Force too small ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w, 1);
int roi_height = max(roi_end_h - roi_start_h, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height - 1);
hend = min(max(hend + roi_start_h, 0), height - 1);
wstart = min(max(wstart + roi_start_w, 0), width - 1);
wend = min(max(wend + roi_start_w, 0), width - 1);
bool is_empty = (hend <= hstart) || (wend <= wstart);
const T* offset_input =
input + (roi_batch_ind * channels + c_in) * height * width;
T out_sum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * width + w;
out_sum += offset_input[input_index];
}
}
T bin_area = (hend - hstart) * (wend - wstart);
output[index] = is_empty ? static_cast<T>(0) : out_sum / bin_area;
channel_mapping[index] = c_in;
}
}
template <typename T>
__global__ void ps_roi_pool_backward_kernel_impl(
int nthreads,
const T* grad_output,
const int* channel_mapping,
int num_rois,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
int channels_out,
T* grad_input,
const T* rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, *, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / channels_out;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
int roi_start_w = roundf(offset_rois[1] * spatial_scale);
int roi_start_h = roundf(offset_rois[2] * spatial_scale);
int roi_end_w = roundf(offset_rois[3] * spatial_scale);
int roi_end_h = roundf(offset_rois[4] * spatial_scale);
// Force too small ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w, 1);
int roi_height = max(roi_end_h - roi_start_h, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int c_in = channel_mapping[index];
T* grad_input_offset =
grad_input + (roi_batch_ind * channels + c_in) * height * width;
T bin_area = (hend - hstart) * (wend - wstart);
T diff_val = is_empty ? static_cast<T>(0) : grad_output[index] / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int grad_input_index = h * width + w;
atomicAdd(grad_input_offset + grad_input_index, diff_val);
}
}
}
}
std::tuple<at::Tensor, at::Tensor> ps_roi_pool_forward_kernel(
const at::Tensor& input,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width) {
// Check if input tensors are CUDA tensors
TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ps_roi_pool_forward_kernel";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
TORCH_CHECK(
channels % (pooled_height * pooled_width) == 0,
"input channels must be a multiple of pooling height * pooling width");
int channels_out = channels / (pooled_height * pooled_width);
auto output = at::zeros(
{num_rois, channels_out, pooled_height, pooled_width}, input.options());
auto channel_mapping =
at::zeros(output.sizes(), input.options().dtype(at::kInt));
auto output_size = output.numel();
if (output_size == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(output, channel_mapping);
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
auto input_ = input.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "ps_roi_pool_forward_kernel", [&] {
hipLaunchKernelGGL(( ps_roi_pool_forward_kernel_impl<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input_.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
rois_.data_ptr<scalar_t>(),
channels_out,
output.data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>());
});
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(output, channel_mapping);
}
at::Tensor ps_roi_pool_backward_kernel(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& channel_mapping,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t batch_size,
int64_t channels,
int64_t height,
int64_t width) {
// Check if input tensors are CUDA tensors
TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
channel_mapping.is_cuda(), "channel_mapping must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
channel_mapping_t{channel_mapping, "channel_mapping", 3};
at::CheckedFrom c = "ps_roi_pool_backward_kernel";
at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
int channels_out = channels / (pooled_height * pooled_width);
auto grad_ = grad.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "ps_roi_pool_backward_kernel", [&] {
hipLaunchKernelGGL(( ps_roi_pool_backward_kernel_impl<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad_.data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
channels_out,
grad_input.data_ptr<scalar_t>(),
rois_.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, CUDA, m) {
m.impl(
TORCH_SELECTIVE_NAME("torchvision::ps_roi_pool"),
TORCH_FN(ps_roi_pool_forward_kernel));
m.impl(
TORCH_SELECTIVE_NAME("torchvision::_ps_roi_pool_backward"),
TORCH_FN(ps_roi_pool_backward_kernel));
}
} // namespace ops
} // namespace vision
| fa272ca20d070249b2ff5f69c0cfd988d92ec512.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <torch/library.h>
#include <THC/THCAtomics.cuh>
#include "cuda_helpers.h"
namespace vision {
namespace ops {
namespace {
template <typename T>
__global__ void ps_roi_pool_forward_kernel_impl(
int nthreads,
const T* input,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
const T* rois,
int channels_out,
T* output,
int* channel_mapping) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c_out, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c_out = (index / pooled_width / pooled_height) % channels_out;
int n = index / pooled_width / pooled_height / channels_out;
// (n, c_in, ph, pw) is the associated element in the input
int c_in = (c_out * pooled_height + ph) * pooled_width + pw;
// [start, end) interval for spatial sampling
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
int roi_start_w = roundf(offset_rois[1] * spatial_scale);
int roi_start_h = roundf(offset_rois[2] * spatial_scale);
int roi_end_w = roundf(offset_rois[3] * spatial_scale);
int roi_end_h = roundf(offset_rois[4] * spatial_scale);
// Force too small ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w, 1);
int roi_height = max(roi_end_h - roi_start_h, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height - 1);
hend = min(max(hend + roi_start_h, 0), height - 1);
wstart = min(max(wstart + roi_start_w, 0), width - 1);
wend = min(max(wend + roi_start_w, 0), width - 1);
bool is_empty = (hend <= hstart) || (wend <= wstart);
const T* offset_input =
input + (roi_batch_ind * channels + c_in) * height * width;
T out_sum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * width + w;
out_sum += offset_input[input_index];
}
}
T bin_area = (hend - hstart) * (wend - wstart);
output[index] = is_empty ? static_cast<T>(0) : out_sum / bin_area;
channel_mapping[index] = c_in;
}
}
template <typename T>
__global__ void ps_roi_pool_backward_kernel_impl(
int nthreads,
const T* grad_output,
const int* channel_mapping,
int num_rois,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
int channels_out,
T* grad_input,
const T* rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, *, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / channels_out;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
int roi_start_w = roundf(offset_rois[1] * spatial_scale);
int roi_start_h = roundf(offset_rois[2] * spatial_scale);
int roi_end_w = roundf(offset_rois[3] * spatial_scale);
int roi_end_h = roundf(offset_rois[4] * spatial_scale);
// Force too small ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w, 1);
int roi_height = max(roi_end_h - roi_start_h, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int c_in = channel_mapping[index];
T* grad_input_offset =
grad_input + (roi_batch_ind * channels + c_in) * height * width;
T bin_area = (hend - hstart) * (wend - wstart);
T diff_val = is_empty ? static_cast<T>(0) : grad_output[index] / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int grad_input_index = h * width + w;
atomicAdd(grad_input_offset + grad_input_index, diff_val);
}
}
}
}
std::tuple<at::Tensor, at::Tensor> ps_roi_pool_forward_kernel(
const at::Tensor& input,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width) {
// Check if input tensors are CUDA tensors
TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ps_roi_pool_forward_kernel";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::cuda::CUDAGuard device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
TORCH_CHECK(
channels % (pooled_height * pooled_width) == 0,
"input channels must be a multiple of pooling height * pooling width");
int channels_out = channels / (pooled_height * pooled_width);
auto output = at::zeros(
{num_rois, channels_out, pooled_height, pooled_width}, input.options());
auto channel_mapping =
at::zeros(output.sizes(), input.options().dtype(at::kInt));
auto output_size = output.numel();
if (output_size == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(output, channel_mapping);
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
auto input_ = input.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "ps_roi_pool_forward_kernel", [&] {
ps_roi_pool_forward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input_.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
rois_.data_ptr<scalar_t>(),
channels_out,
output.data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>());
});
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(output, channel_mapping);
}
at::Tensor ps_roi_pool_backward_kernel(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& channel_mapping,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t batch_size,
int64_t channels,
int64_t height,
int64_t width) {
// Check if input tensors are CUDA tensors
TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
channel_mapping.is_cuda(), "channel_mapping must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
channel_mapping_t{channel_mapping, "channel_mapping", 3};
at::CheckedFrom c = "ps_roi_pool_backward_kernel";
at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::cuda::CUDAGuard device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
int channels_out = channels / (pooled_height * pooled_width);
auto grad_ = grad.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "ps_roi_pool_backward_kernel", [&] {
ps_roi_pool_backward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad_.data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
channels_out,
grad_input.data_ptr<scalar_t>(),
rois_.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, CUDA, m) {
m.impl(
TORCH_SELECTIVE_NAME("torchvision::ps_roi_pool"),
TORCH_FN(ps_roi_pool_forward_kernel));
m.impl(
TORCH_SELECTIVE_NAME("torchvision::_ps_roi_pool_backward"),
TORCH_FN(ps_roi_pool_backward_kernel));
}
} // namespace ops
} // namespace vision
|
389328d2025bcda6aa9416860273ffe34b18a1b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define NUM_BLOCK (128*1024)
#define NUM_T_IN_B 1024
#define ARRAY_SIZE (NUM_T_IN_B*NUM_BLOCK)
#define NUM_STREAMS 2
int main(void)
{
int *in = NULL, *out = NULL, *dIn = NULL, *dOut = NULL;
hipHostMalloc(&in, sizeof(int)*ARRAY_SIZE); memset(in, 0, sizeof(int)*ARRAY_SIZE);
hipHostMalloc(&out, sizeof(int)*ARRAY_SIZE); memset(out, 0, sizeof(int)*ARRAY_SIZE);
hipMalloc(&dIn, sizeof(int)*ARRAY_SIZE);
hipMalloc(&dOut, sizeof(int)*ARRAY_SIZE);
LOOP_I(ARRAY_SIZE);
in[i] = rand() % 10;
// Single stream version
hipMemcpy(dIn, in, sizeof(int)*ARRAY_SIZE, hipMemcpyHostToDevice);
myKernel << <NUM_BLOCK, NUM_T_IN_B>>> (dIn, dOut);
hipMemcpy(out, dOut, sizeof(int)*ARRAY_SIZE, hipMemcpyDeviceToHost);
// Multi-stream version
hipStream_t stream[NUM_STREAMS];
LOOP_I(NUM_STREAMS);
hipStreamCreate(&stream[i]);
int chunkSize = ARRAY_SIZE / NUM_STREAMS;
LOOP_I(NUM_STREAMS)
{
int offset = chunkSize * i;
hipMemcpyAsync(dIn + offset, in + offset
, sizeof(int)*chunkSize, hipMemcpyHostToDevice, stream[i]);
hipLaunchKernelGGL(( myKernel) , dim3(NUM_BLOCK / NUM_STREAMS), dim3(NUM_T_IN_B), 0, stream[i],
dIn + offset, dOut + offset);
hipMemcpyAsync(out2 + offset, dOut + offset
, sizeof(int)*chunkSize, hipMemcpyDeviceToHost, stream[i]);
}
hipDeviceSynchronize();
LOOP_I(NUM_STREAMS) hipStreamDestroy(stream[i]);
hipFree(dIn); hipFree(dOut);
hipHostFree(in); hipHostFree(out); hipHostFree(out2);
}
| 389328d2025bcda6aa9416860273ffe34b18a1b5.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define NUM_BLOCK (128*1024)
#define NUM_T_IN_B 1024
#define ARRAY_SIZE (NUM_T_IN_B*NUM_BLOCK)
#define NUM_STREAMS 2
int main(void)
{
int *in = NULL, *out = NULL, *dIn = NULL, *dOut = NULL;
cudaMallocHost(&in, sizeof(int)*ARRAY_SIZE); memset(in, 0, sizeof(int)*ARRAY_SIZE);
cudaMallocHost(&out, sizeof(int)*ARRAY_SIZE); memset(out, 0, sizeof(int)*ARRAY_SIZE);
cudaMalloc(&dIn, sizeof(int)*ARRAY_SIZE);
cudaMalloc(&dOut, sizeof(int)*ARRAY_SIZE);
LOOP_I(ARRAY_SIZE);
in[i] = rand() % 10;
// Single stream version
cudaMemcpy(dIn, in, sizeof(int)*ARRAY_SIZE, cudaMemcpyHostToDevice);
myKernel << <NUM_BLOCK, NUM_T_IN_B>>> (dIn, dOut);
cudaMemcpy(out, dOut, sizeof(int)*ARRAY_SIZE, cudaMemcpyDeviceToHost);
// Multi-stream version
cudaStream_t stream[NUM_STREAMS];
LOOP_I(NUM_STREAMS);
cudaStreamCreate(&stream[i]);
int chunkSize = ARRAY_SIZE / NUM_STREAMS;
LOOP_I(NUM_STREAMS)
{
int offset = chunkSize * i;
cudaMemcpyAsync(dIn + offset, in + offset
, sizeof(int)*chunkSize, cudaMemcpyHostToDevice, stream[i]);
myKernel <<<NUM_BLOCK / NUM_STREAMS, NUM_T_IN_B, 0, stream[i]>>>
(dIn + offset, dOut + offset);
cudaMemcpyAsync(out2 + offset, dOut + offset
, sizeof(int)*chunkSize, cudaMemcpyDeviceToHost, stream[i]);
}
cudaDeviceSynchronize();
LOOP_I(NUM_STREAMS) cudaStreamDestroy(stream[i]);
cudaFree(dIn); cudaFree(dOut);
cudaFreeHost(in); cudaFreeHost(out); cudaFreeHost(out2);
}
|
1a5049a4a0b0e75cb58465891c23b36753e22065.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "helper_cuda.h"
#include "helper_functions.h"
#include <stdio.h>
#include "kernel_hip.cuh"
__global__ void RGradient_kernel(const double *d_InputIMGR, const double *d_InputIMGT, const double* __restrict__ d_InputBiubicMatrix,
double *d_OutputIMGR, double *d_OutputIMGT,
double *d_OutputIMGRx, double *d_OutputIMGRy,
double *d_OutputIMGTx, double *d_OutputIMGTy, double *d_OutputIMGTxy, double *d_OutputdtBicubic,
int width, int height)
{
//The size of input images
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
//Temp arrays
double d_TaoT[16];
double d_AlphaT[16];
//The rows and cols of output matrix.
if((row < height) && (col < width)){
d_OutputIMGR[row*width+col] = d_InputIMGR[(row+1)*(width+2)+col+1];
d_OutputIMGRx[row*width+col] = 0.5 * (d_InputIMGR[(row+1)*(width+2)+col+2] - d_InputIMGR[(row+1)*(width+2)+col]);
d_OutputIMGRy[row*width+col] = 0.5 * (d_InputIMGR[(row+2)*(width+2)+col+1] - d_InputIMGR[(row)*(width+2)+col+1]);
d_OutputIMGT[row*width+col] = d_InputIMGT[(row+1)*(width+2)+col+1];
d_OutputIMGTx[row*width+col] = 0.5 * (d_InputIMGT[(row+1)*(width+2)+col+2] -d_InputIMGT[(row+1)*(width+2)+col]);
d_OutputIMGTy[row*width+col] = 0.5 * (d_InputIMGT[(row+2)*(width+2)+col+1] - d_InputIMGT[(row)*(width+2)+col+1]);
d_OutputIMGTxy[row*width+col]= 0.25 * (d_InputIMGT[(row+2)*(width+2)+col+2] - d_InputIMGT[(row)*(width+2)+col+2] -d_InputIMGT[(row+2)*(width+2)+col] + d_InputIMGT[(row)*(width+2)+col]);
}
__syncthreads();
if((row < height-1) && (col < width-1)){
d_TaoT[0] = d_OutputIMGT[row*(width)+col];
d_TaoT[1] = d_OutputIMGT[row*(width)+col+1];
d_TaoT[2] = d_OutputIMGT[(row+1)*(width)+col];
d_TaoT[3] = d_OutputIMGT[(row+1)*(width)+col+1];
d_TaoT[4] = d_OutputIMGTx[row*(width)+col];
d_TaoT[5] = d_OutputIMGTx[row*(width)+col+1];
d_TaoT[6] = d_OutputIMGTx[(row+1)*(width)+col];
d_TaoT[7] = d_OutputIMGTx[(row+1)*(width)+col+1];
d_TaoT[8] = d_OutputIMGTy[row*(width)+col];
d_TaoT[9] = d_OutputIMGTy[row*(width)+col+1];
d_TaoT[10] = d_OutputIMGTy[(row+1)*(width)+col];
d_TaoT[11] = d_OutputIMGTy[(row+1)*(width)+col+1];
d_TaoT[12] = d_OutputIMGTxy[row*(width)+col];
d_TaoT[13] = d_OutputIMGTxy[row*(width)+col+1];
d_TaoT[14] = d_OutputIMGTxy[(row+1)*(width)+col];
d_TaoT[15] = d_OutputIMGTxy[(row+1)*(width)+col+1];
for(int k=0; k<16; k++){
d_AlphaT[k] = 0.0;
for(int l=0; l<16; l++){
d_AlphaT[k] += (d_InputBiubicMatrix[k*16+l] * d_TaoT[l]);
}
}
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+0] = d_AlphaT[0];
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+1] = d_AlphaT[1];
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+2] = d_AlphaT[2];
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+3] = d_AlphaT[3];
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+0] = d_AlphaT[4];
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+1] = d_AlphaT[5];
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+2] = d_AlphaT[6];
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+3] = d_AlphaT[7];
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+0] = d_AlphaT[8];
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+1] = d_AlphaT[9];
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+2] = d_AlphaT[10];
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+3] = d_AlphaT[11];
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+0] = d_AlphaT[12];
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+1] = d_AlphaT[13];
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+2] = d_AlphaT[14];
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+3] = d_AlphaT[15];
}
else {
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+0] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+1] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+2] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+3] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+0] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+1] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+2] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+3] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+0] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+1] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+2] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+3] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+0] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+1] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+2] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+3] = 0;
}
}
void launch_kernel(const double *h_InputIMGR, const double *h_InputIMGT,
double *h_OutputIMGR, double *h_OutputIMGT,
double *h_OutputIMGRx, double *h_OutputIMGRy,
double *h_OutputIMGTx, double *h_OutputIMGTy, double *h_OutputIMGTxy, double *h_OutputdTBicubic,
int width, int height)
{
float total_time, compute_time;
StopWatchWin total, compute;
double *d_InputIMGR, *d_InputIMGT, *d_InputBiubicMatrix;
double *d_OutputIMGR, *d_OutputIMGT, *d_OutputIMGRx, *d_OutputIMGRy, *d_OutputIMGTx, *d_OutputIMGTy, *d_OutputIMGTxy;
double *d_OutputdTBicubic;
const static double h_InputBicubicMatrix[16*16] = {
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,
-3, 3, 0, 0, -2, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, -2, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 ,
0, 0, 0, 0, 0, 0, 0, 0, -3, 3, 0, 0, -2, -1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 1, 1, 0, 0 ,
-3, 0, 3, 0, 0, 0, 0, 0, -2, 0, -1, 0, 0, 0, 0, 0,
0, 0, 0, 0, -3, 0, 3, 0, 0, 0, 0, 0, -2, 0, -1, 0,
9, -9, -9, 9, 6, 3, -6, -3, 6, -6, 3, -3, 4, 2, 2, 1 ,
-6, 6, 6, -6, -3, -3, 3, 3, -4, 4, -2, 2, -2, -2, -1, -1,
2, 0, -2, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0 ,
0, 0, 0, 0, 2, 0, -2, 0, 0, 0, 0, 0, 1, 0, 1, 0 ,
-6, 6, 6, -6, -4, -2, 4, 2, -3, 3, -3, 3, -2, -1, -2, -1,
4, -4, -4, 4, 2, 2, -2, -2, 2, -2, 2, -2, 1, 1, 1, 1
};
total.start();
checkCudaErrors(hipMalloc((void**)&d_InputIMGR, (width+2)*(height+2)*sizeof(double)));
checkCudaErrors(hipMalloc((void**)&d_InputIMGT, (width+2)*(height+2)*sizeof(double)));
checkCudaErrors(hipMalloc((void**)&d_InputBiubicMatrix, 16*16*sizeof(double)));
checkCudaErrors(hipMemcpy(d_InputIMGR,h_InputIMGR,(width+2)*(height+2)*sizeof(double),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_InputIMGT,h_InputIMGT,(width+2)*(height+2)*sizeof(double),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_InputBiubicMatrix,h_InputBicubicMatrix,16*16*sizeof(double),hipMemcpyHostToDevice));
checkCudaErrors(hipHostMalloc((void**)&h_OutputIMGR,width*height*sizeof(double),hipHostMallocMapped));
checkCudaErrors(hipHostMalloc((void**)&h_OutputIMGT,width*height*sizeof(double),hipHostMallocMapped));
checkCudaErrors(hipHostMalloc((void**)&h_OutputIMGRx,width*height*sizeof(double),hipHostMallocMapped));
checkCudaErrors(hipHostMalloc((void**)&h_OutputIMGRy,width*height*sizeof(double),hipHostMallocMapped));
checkCudaErrors(hipHostMalloc((void**)&h_OutputIMGTx,width*height*sizeof(double),hipHostMallocMapped));
checkCudaErrors(hipHostMalloc((void**)&h_OutputIMGTy,width*height*sizeof(double),hipHostMallocMapped));
checkCudaErrors(hipHostMalloc((void**)&h_OutputIMGTxy,width*height*sizeof(double),hipHostMallocMapped));
checkCudaErrors(hipHostMalloc((void**)&h_OutputdTBicubic,width*height*4*4*sizeof(double),hipHostMallocMapped));
checkCudaErrors(hipHostGetDevicePointer(&d_OutputIMGR,h_OutputIMGR,0));
checkCudaErrors(hipHostGetDevicePointer(&d_OutputIMGT,h_OutputIMGT,0));
checkCudaErrors(hipHostGetDevicePointer(&d_OutputIMGRx,h_OutputIMGRx,0));
checkCudaErrors(hipHostGetDevicePointer(&d_OutputIMGRy,h_OutputIMGRy,0));
checkCudaErrors(hipHostGetDevicePointer(&d_OutputIMGTx,h_OutputIMGTx,0));
checkCudaErrors(hipHostGetDevicePointer(&d_OutputIMGTy,h_OutputIMGTy,0));
checkCudaErrors(hipHostGetDevicePointer(&d_OutputIMGTxy,h_OutputIMGTxy,0));
checkCudaErrors(hipHostGetDevicePointer(&d_OutputdTBicubic,h_OutputdTBicubic,0));
dim3 dimB(BLOCK_WIDTH,BLOCK_WIDTH,1);
dim3 dimG((width-1)/BLOCK_WIDTH+1,(height-1)/BLOCK_WIDTH+1,1);
compute.start();
hipLaunchKernelGGL(( RGradient_kernel), dim3(dimG), dim3(dimB), 0, 0, d_InputIMGR,d_InputIMGT,d_InputBiubicMatrix,
d_OutputIMGR, d_OutputIMGT,
d_OutputIMGRx, d_OutputIMGRy,
d_OutputIMGTx, d_OutputIMGTy, d_OutputIMGTxy,d_OutputdTBicubic,
width, height);
hipDeviceSynchronize();
compute.stop();
compute_time = compute.getTime();
/*checkCudaErrors(hipMemcpy(h_OutputIMGR,d_OutputIMGR,width*height*sizeof(double),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_OutputIMGT,d_OutputIMGT,width*height*sizeof(double),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_OutputIMGRx,d_OutputIMGRx,width*height*sizeof(double),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_OutputIMGRy,d_OutputIMGRy,width*height*sizeof(double),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_OutputIMGTx,d_OutputIMGTx,width*height*sizeof(double),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_OutputIMGTy,d_OutputIMGTy,width*height*sizeof(double),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_OutputIMGTxy,d_OutputIMGTxy,width*height*sizeof(double),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_OutputdTBicubic,d_OutputdTBicubic,width*height*4*4*sizeof(double),hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_OutputIMGR));
checkCudaErrors(hipFree(d_OutputIMGT));
checkCudaErrors(hipFree(d_OutputIMGRx));
checkCudaErrors(hipFree(d_OutputIMGRy));
checkCudaErrors(hipFree(d_OutputIMGTx));
checkCudaErrors(hipFree(d_OutputIMGTy));
checkCudaErrors(hipFree(d_OutputIMGTxy));
checkCudaErrors(hipFree(d_OutputdTBicubic));*/
total.stop();
total_time = total.getTime();
printf("\nTotal time: %f\n",total_time);
printf("Compute time: %f\n",compute_time);
}
void freeing(void* pointer)
{
hipHostFree(pointer);
}
| 1a5049a4a0b0e75cb58465891c23b36753e22065.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "helper_cuda.h"
#include "helper_functions.h"
#include <stdio.h>
#include "kernel.cuh"
__global__ void RGradient_kernel(const double *d_InputIMGR, const double *d_InputIMGT, const double* __restrict__ d_InputBiubicMatrix,
double *d_OutputIMGR, double *d_OutputIMGT,
double *d_OutputIMGRx, double *d_OutputIMGRy,
double *d_OutputIMGTx, double *d_OutputIMGTy, double *d_OutputIMGTxy, double *d_OutputdtBicubic,
int width, int height)
{
//The size of input images
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
//Temp arrays
double d_TaoT[16];
double d_AlphaT[16];
//The rows and cols of output matrix.
if((row < height) && (col < width)){
d_OutputIMGR[row*width+col] = d_InputIMGR[(row+1)*(width+2)+col+1];
d_OutputIMGRx[row*width+col] = 0.5 * (d_InputIMGR[(row+1)*(width+2)+col+2] - d_InputIMGR[(row+1)*(width+2)+col]);
d_OutputIMGRy[row*width+col] = 0.5 * (d_InputIMGR[(row+2)*(width+2)+col+1] - d_InputIMGR[(row)*(width+2)+col+1]);
d_OutputIMGT[row*width+col] = d_InputIMGT[(row+1)*(width+2)+col+1];
d_OutputIMGTx[row*width+col] = 0.5 * (d_InputIMGT[(row+1)*(width+2)+col+2] -d_InputIMGT[(row+1)*(width+2)+col]);
d_OutputIMGTy[row*width+col] = 0.5 * (d_InputIMGT[(row+2)*(width+2)+col+1] - d_InputIMGT[(row)*(width+2)+col+1]);
d_OutputIMGTxy[row*width+col]= 0.25 * (d_InputIMGT[(row+2)*(width+2)+col+2] - d_InputIMGT[(row)*(width+2)+col+2] -d_InputIMGT[(row+2)*(width+2)+col] + d_InputIMGT[(row)*(width+2)+col]);
}
__syncthreads();
if((row < height-1) && (col < width-1)){
d_TaoT[0] = d_OutputIMGT[row*(width)+col];
d_TaoT[1] = d_OutputIMGT[row*(width)+col+1];
d_TaoT[2] = d_OutputIMGT[(row+1)*(width)+col];
d_TaoT[3] = d_OutputIMGT[(row+1)*(width)+col+1];
d_TaoT[4] = d_OutputIMGTx[row*(width)+col];
d_TaoT[5] = d_OutputIMGTx[row*(width)+col+1];
d_TaoT[6] = d_OutputIMGTx[(row+1)*(width)+col];
d_TaoT[7] = d_OutputIMGTx[(row+1)*(width)+col+1];
d_TaoT[8] = d_OutputIMGTy[row*(width)+col];
d_TaoT[9] = d_OutputIMGTy[row*(width)+col+1];
d_TaoT[10] = d_OutputIMGTy[(row+1)*(width)+col];
d_TaoT[11] = d_OutputIMGTy[(row+1)*(width)+col+1];
d_TaoT[12] = d_OutputIMGTxy[row*(width)+col];
d_TaoT[13] = d_OutputIMGTxy[row*(width)+col+1];
d_TaoT[14] = d_OutputIMGTxy[(row+1)*(width)+col];
d_TaoT[15] = d_OutputIMGTxy[(row+1)*(width)+col+1];
for(int k=0; k<16; k++){
d_AlphaT[k] = 0.0;
for(int l=0; l<16; l++){
d_AlphaT[k] += (d_InputBiubicMatrix[k*16+l] * d_TaoT[l]);
}
}
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+0] = d_AlphaT[0];
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+1] = d_AlphaT[1];
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+2] = d_AlphaT[2];
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+3] = d_AlphaT[3];
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+0] = d_AlphaT[4];
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+1] = d_AlphaT[5];
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+2] = d_AlphaT[6];
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+3] = d_AlphaT[7];
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+0] = d_AlphaT[8];
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+1] = d_AlphaT[9];
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+2] = d_AlphaT[10];
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+3] = d_AlphaT[11];
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+0] = d_AlphaT[12];
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+1] = d_AlphaT[13];
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+2] = d_AlphaT[14];
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+3] = d_AlphaT[15];
}
else {
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+0] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+1] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+2] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+3] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+0] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+1] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+2] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+3] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+0] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+1] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+2] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+3] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+0] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+1] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+2] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+3] = 0;
}
}
void launch_kernel(const double *h_InputIMGR, const double *h_InputIMGT,
double *h_OutputIMGR, double *h_OutputIMGT,
double *h_OutputIMGRx, double *h_OutputIMGRy,
double *h_OutputIMGTx, double *h_OutputIMGTy, double *h_OutputIMGTxy, double *h_OutputdTBicubic,
int width, int height)
{
float total_time, compute_time;
StopWatchWin total, compute;
double *d_InputIMGR, *d_InputIMGT, *d_InputBiubicMatrix;
double *d_OutputIMGR, *d_OutputIMGT, *d_OutputIMGRx, *d_OutputIMGRy, *d_OutputIMGTx, *d_OutputIMGTy, *d_OutputIMGTxy;
double *d_OutputdTBicubic;
const static double h_InputBicubicMatrix[16*16] = {
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,
-3, 3, 0, 0, -2, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, -2, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 ,
0, 0, 0, 0, 0, 0, 0, 0, -3, 3, 0, 0, -2, -1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 1, 1, 0, 0 ,
-3, 0, 3, 0, 0, 0, 0, 0, -2, 0, -1, 0, 0, 0, 0, 0,
0, 0, 0, 0, -3, 0, 3, 0, 0, 0, 0, 0, -2, 0, -1, 0,
9, -9, -9, 9, 6, 3, -6, -3, 6, -6, 3, -3, 4, 2, 2, 1 ,
-6, 6, 6, -6, -3, -3, 3, 3, -4, 4, -2, 2, -2, -2, -1, -1,
2, 0, -2, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0 ,
0, 0, 0, 0, 2, 0, -2, 0, 0, 0, 0, 0, 1, 0, 1, 0 ,
-6, 6, 6, -6, -4, -2, 4, 2, -3, 3, -3, 3, -2, -1, -2, -1,
4, -4, -4, 4, 2, 2, -2, -2, 2, -2, 2, -2, 1, 1, 1, 1
};
total.start();
checkCudaErrors(cudaMalloc((void**)&d_InputIMGR, (width+2)*(height+2)*sizeof(double)));
checkCudaErrors(cudaMalloc((void**)&d_InputIMGT, (width+2)*(height+2)*sizeof(double)));
checkCudaErrors(cudaMalloc((void**)&d_InputBiubicMatrix, 16*16*sizeof(double)));
checkCudaErrors(cudaMemcpy(d_InputIMGR,h_InputIMGR,(width+2)*(height+2)*sizeof(double),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_InputIMGT,h_InputIMGT,(width+2)*(height+2)*sizeof(double),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_InputBiubicMatrix,h_InputBicubicMatrix,16*16*sizeof(double),cudaMemcpyHostToDevice));
checkCudaErrors(cudaHostAlloc((void**)&h_OutputIMGR,width*height*sizeof(double),cudaHostAllocMapped));
checkCudaErrors(cudaHostAlloc((void**)&h_OutputIMGT,width*height*sizeof(double),cudaHostAllocMapped));
checkCudaErrors(cudaHostAlloc((void**)&h_OutputIMGRx,width*height*sizeof(double),cudaHostAllocMapped));
checkCudaErrors(cudaHostAlloc((void**)&h_OutputIMGRy,width*height*sizeof(double),cudaHostAllocMapped));
checkCudaErrors(cudaHostAlloc((void**)&h_OutputIMGTx,width*height*sizeof(double),cudaHostAllocMapped));
checkCudaErrors(cudaHostAlloc((void**)&h_OutputIMGTy,width*height*sizeof(double),cudaHostAllocMapped));
checkCudaErrors(cudaHostAlloc((void**)&h_OutputIMGTxy,width*height*sizeof(double),cudaHostAllocMapped));
checkCudaErrors(cudaHostAlloc((void**)&h_OutputdTBicubic,width*height*4*4*sizeof(double),cudaHostAllocMapped));
checkCudaErrors(cudaHostGetDevicePointer(&d_OutputIMGR,h_OutputIMGR,0));
checkCudaErrors(cudaHostGetDevicePointer(&d_OutputIMGT,h_OutputIMGT,0));
checkCudaErrors(cudaHostGetDevicePointer(&d_OutputIMGRx,h_OutputIMGRx,0));
checkCudaErrors(cudaHostGetDevicePointer(&d_OutputIMGRy,h_OutputIMGRy,0));
checkCudaErrors(cudaHostGetDevicePointer(&d_OutputIMGTx,h_OutputIMGTx,0));
checkCudaErrors(cudaHostGetDevicePointer(&d_OutputIMGTy,h_OutputIMGTy,0));
checkCudaErrors(cudaHostGetDevicePointer(&d_OutputIMGTxy,h_OutputIMGTxy,0));
checkCudaErrors(cudaHostGetDevicePointer(&d_OutputdTBicubic,h_OutputdTBicubic,0));
dim3 dimB(BLOCK_WIDTH,BLOCK_WIDTH,1);
dim3 dimG((width-1)/BLOCK_WIDTH+1,(height-1)/BLOCK_WIDTH+1,1);
compute.start();
RGradient_kernel<<<dimG, dimB>>>(d_InputIMGR,d_InputIMGT,d_InputBiubicMatrix,
d_OutputIMGR, d_OutputIMGT,
d_OutputIMGRx, d_OutputIMGRy,
d_OutputIMGTx, d_OutputIMGTy, d_OutputIMGTxy,d_OutputdTBicubic,
width, height);
cudaThreadSynchronize();
compute.stop();
compute_time = compute.getTime();
/*checkCudaErrors(cudaMemcpy(h_OutputIMGR,d_OutputIMGR,width*height*sizeof(double),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_OutputIMGT,d_OutputIMGT,width*height*sizeof(double),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_OutputIMGRx,d_OutputIMGRx,width*height*sizeof(double),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_OutputIMGRy,d_OutputIMGRy,width*height*sizeof(double),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_OutputIMGTx,d_OutputIMGTx,width*height*sizeof(double),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_OutputIMGTy,d_OutputIMGTy,width*height*sizeof(double),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_OutputIMGTxy,d_OutputIMGTxy,width*height*sizeof(double),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_OutputdTBicubic,d_OutputdTBicubic,width*height*4*4*sizeof(double),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_OutputIMGR));
checkCudaErrors(cudaFree(d_OutputIMGT));
checkCudaErrors(cudaFree(d_OutputIMGRx));
checkCudaErrors(cudaFree(d_OutputIMGRy));
checkCudaErrors(cudaFree(d_OutputIMGTx));
checkCudaErrors(cudaFree(d_OutputIMGTy));
checkCudaErrors(cudaFree(d_OutputIMGTxy));
checkCudaErrors(cudaFree(d_OutputdTBicubic));*/
total.stop();
total_time = total.getTime();
printf("\nTotal time: %f\n",total_time);
printf("Compute time: %f\n",compute_time);
}
void freeing(void* pointer)
{
cudaFreeHost(pointer);
}
|
aa80fed0aab33bdae741d03bc7601d9f9be8b871.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <strings/regex/regex.cuh>
#include <strings/utilities.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/replace_re.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace strings {
namespace detail {
namespace {
// this is a [begin,end) pair of character positions when a substring is matched
using found_range = thrust::pair<size_type, size_type>;
/**
* @brief This functor handles replacing strings by applying the compiled regex patterns
* and inserting the corresponding new string within the matched range of characters.
*
* The logic includes computing the size of each string and also writing the output.
*
* The stack is used to keep progress on evaluating the regex instructions on each string.
* So the size of the stack is in proportion to the number of instructions in the given regex
* pattern.
*
* There are three call types based on the number of regex instructions in the given pattern.
* Small to medium instruction lengths can use the stack effectively though smaller executes faster.
* Longer patterns require global memory. Shorter patterns are common in data cleaning.
*/
template <size_t stack_size>
struct replace_multi_regex_fn {
column_device_view const d_strings;
reprog_device* progs; // array of regex progs
size_type number_of_patterns;
found_range* d_found_ranges; // working array matched (begin,end) values
column_device_view const d_repls; // replacment strings
int32_t* d_offsets{}; // these are null when
char* d_chars{}; // only computing size
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
u_char data1[stack_size];
u_char data2[stack_size];
auto const d_str = d_strings.element<string_view>(idx);
auto const nchars = d_str.length(); // number of characters in input string
auto nbytes = d_str.size_bytes(); // number of bytes in input string
auto in_ptr = d_str.data(); // input pointer
auto out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr;
found_range* d_ranges = d_found_ranges + (idx * number_of_patterns);
size_type lpos = 0;
size_type ch_pos = 0;
// initialize the working ranges memory to -1's
thrust::fill(thrust::seq, d_ranges, d_ranges + number_of_patterns, found_range{-1, 1});
// process string one character at a time
while (ch_pos < nchars) {
// this minimizes the regex-find calls by only calling it for stale patterns
// -- those that have not previously matched up to this point (ch_pos)
for (size_type ptn_idx = 0; ptn_idx < number_of_patterns; ++ptn_idx) {
if (d_ranges[ptn_idx].first >= ch_pos) // previously matched here
continue; // or later in the string
reprog_device prog = progs[ptn_idx];
prog.set_stack_mem(data1, data2);
auto begin = static_cast<int32_t>(ch_pos);
auto end = static_cast<int32_t>(nchars);
if (!prog.is_empty() && prog.find(idx, d_str, begin, end) > 0)
d_ranges[ptn_idx] = found_range{begin, end}; // found a match
else
d_ranges[ptn_idx] = found_range{nchars, nchars}; // this pattern is done
}
// all the ranges have been updated from each regex match;
// look for any that match at this character position (ch_pos)
auto itr =
thrust::find_if(thrust::seq, d_ranges, d_ranges + number_of_patterns, [ch_pos](auto range) {
return range.first == ch_pos;
});
if (itr != d_ranges + number_of_patterns) {
// match found, compute and replace the string in the output
size_type ptn_idx = static_cast<size_type>(itr - d_ranges);
size_type begin = d_ranges[ptn_idx].first;
size_type end = d_ranges[ptn_idx].second;
string_view d_repl = d_repls.size() > 1 ? d_repls.element<string_view>(ptn_idx)
: d_repls.element<string_view>(0);
auto spos = d_str.byte_offset(begin);
auto epos = d_str.byte_offset(end);
nbytes += d_repl.size_bytes() - (epos - spos);
if (out_ptr) { // copy unmodified content plus new replacement string
out_ptr = copy_and_increment(out_ptr, in_ptr + lpos, spos - lpos);
out_ptr = copy_string(out_ptr, d_repl);
lpos = epos;
}
ch_pos = end - 1;
}
++ch_pos;
}
if (out_ptr) // copy the remainder
memcpy(out_ptr, in_ptr + lpos, d_str.size_bytes() - lpos);
else
d_offsets[idx] = static_cast<int32_t>(nbytes);
}
};
} // namespace
std::unique_ptr<column> replace_re(
strings_column_view const& strings,
std::vector<std::string> const& patterns,
strings_column_view const& repls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
auto strings_count = strings.size();
if (strings_count == 0) return make_empty_strings_column(stream, mr);
if (patterns.empty()) // no patterns; just return a copy
return std::make_unique<column>(strings.parent(), stream, mr);
CUDF_EXPECTS(!repls.has_nulls(), "Parameter repls must not have any nulls");
auto d_strings = column_device_view::create(strings.parent(), stream);
auto d_repls = column_device_view::create(repls.parent(), stream);
auto d_flags = get_character_flags_table();
// compile regexes into device objects
size_type regex_insts = 0;
std::vector<std::unique_ptr<reprog_device, std::function<void(reprog_device*)>>> h_progs;
thrust::host_vector<reprog_device> progs;
for (auto itr = patterns.begin(); itr != patterns.end(); ++itr) {
auto prog = reprog_device::create(*itr, d_flags, strings_count, stream);
regex_insts = ::max(regex_insts, prog->insts_counts());
progs.push_back(*prog);
h_progs.emplace_back(std::move(prog));
}
// copy all the reprog_device instances to a device memory array
rmm::device_buffer progs_buffer{sizeof(reprog_device) * progs.size()};
CUDA_TRY(hipMemcpyAsync(progs_buffer.data(),
progs.data(),
progs.size() * sizeof(reprog_device),
hipMemcpyHostToDevice,
stream.value()));
reprog_device* d_progs = reinterpret_cast<reprog_device*>(progs_buffer.data());
// create working buffer for ranges pairs
rmm::device_uvector<found_range> found_ranges(patterns.size() * strings_count, stream);
auto d_found_ranges = found_ranges.data();
// create child columns
// std::pair<std::unique_ptr<column>, std::unique_ptr<column>> children(nullptr, nullptr);
auto children = [&] {
// Each invocation is predicated on the stack size which is dependent on the number of regex
// instructions
if ((regex_insts > MAX_STACK_INSTS) || (regex_insts <= RX_SMALL_INSTS))
return make_strings_children(
replace_multi_regex_fn<RX_STACK_SMALL>{
*d_strings, d_progs, static_cast<size_type>(progs.size()), d_found_ranges, *d_repls},
strings_count,
stream,
mr);
else if (regex_insts <= RX_MEDIUM_INSTS)
return make_strings_children(
replace_multi_regex_fn<RX_STACK_MEDIUM>{
*d_strings, d_progs, static_cast<size_type>(progs.size()), d_found_ranges, *d_repls},
strings_count,
stream,
mr);
else
return make_strings_children(
replace_multi_regex_fn<RX_STACK_LARGE>{
*d_strings, d_progs, static_cast<size_type>(progs.size()), d_found_ranges, *d_repls},
strings_count,
stream,
mr);
}();
return make_strings_column(strings_count,
std::move(children.first),
std::move(children.second),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr),
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> replace_re(strings_column_view const& strings,
std::vector<std::string> const& patterns,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace_re(strings, patterns, repls, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
| aa80fed0aab33bdae741d03bc7601d9f9be8b871.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <strings/regex/regex.cuh>
#include <strings/utilities.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/replace_re.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace strings {
namespace detail {
namespace {
// this is a [begin,end) pair of character positions when a substring is matched
using found_range = thrust::pair<size_type, size_type>;
/**
* @brief This functor handles replacing strings by applying the compiled regex patterns
* and inserting the corresponding new string within the matched range of characters.
*
* The logic includes computing the size of each string and also writing the output.
*
* The stack is used to keep progress on evaluating the regex instructions on each string.
* So the size of the stack is in proportion to the number of instructions in the given regex
* pattern.
*
* There are three call types based on the number of regex instructions in the given pattern.
* Small to medium instruction lengths can use the stack effectively though smaller executes faster.
* Longer patterns require global memory. Shorter patterns are common in data cleaning.
*/
template <size_t stack_size>
struct replace_multi_regex_fn {
column_device_view const d_strings;
reprog_device* progs; // array of regex progs
size_type number_of_patterns;
found_range* d_found_ranges; // working array matched (begin,end) values
column_device_view const d_repls; // replacment strings
int32_t* d_offsets{}; // these are null when
char* d_chars{}; // only computing size
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
u_char data1[stack_size];
u_char data2[stack_size];
auto const d_str = d_strings.element<string_view>(idx);
auto const nchars = d_str.length(); // number of characters in input string
auto nbytes = d_str.size_bytes(); // number of bytes in input string
auto in_ptr = d_str.data(); // input pointer
auto out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr;
found_range* d_ranges = d_found_ranges + (idx * number_of_patterns);
size_type lpos = 0;
size_type ch_pos = 0;
// initialize the working ranges memory to -1's
thrust::fill(thrust::seq, d_ranges, d_ranges + number_of_patterns, found_range{-1, 1});
// process string one character at a time
while (ch_pos < nchars) {
// this minimizes the regex-find calls by only calling it for stale patterns
// -- those that have not previously matched up to this point (ch_pos)
for (size_type ptn_idx = 0; ptn_idx < number_of_patterns; ++ptn_idx) {
if (d_ranges[ptn_idx].first >= ch_pos) // previously matched here
continue; // or later in the string
reprog_device prog = progs[ptn_idx];
prog.set_stack_mem(data1, data2);
auto begin = static_cast<int32_t>(ch_pos);
auto end = static_cast<int32_t>(nchars);
if (!prog.is_empty() && prog.find(idx, d_str, begin, end) > 0)
d_ranges[ptn_idx] = found_range{begin, end}; // found a match
else
d_ranges[ptn_idx] = found_range{nchars, nchars}; // this pattern is done
}
// all the ranges have been updated from each regex match;
// look for any that match at this character position (ch_pos)
auto itr =
thrust::find_if(thrust::seq, d_ranges, d_ranges + number_of_patterns, [ch_pos](auto range) {
return range.first == ch_pos;
});
if (itr != d_ranges + number_of_patterns) {
// match found, compute and replace the string in the output
size_type ptn_idx = static_cast<size_type>(itr - d_ranges);
size_type begin = d_ranges[ptn_idx].first;
size_type end = d_ranges[ptn_idx].second;
string_view d_repl = d_repls.size() > 1 ? d_repls.element<string_view>(ptn_idx)
: d_repls.element<string_view>(0);
auto spos = d_str.byte_offset(begin);
auto epos = d_str.byte_offset(end);
nbytes += d_repl.size_bytes() - (epos - spos);
if (out_ptr) { // copy unmodified content plus new replacement string
out_ptr = copy_and_increment(out_ptr, in_ptr + lpos, spos - lpos);
out_ptr = copy_string(out_ptr, d_repl);
lpos = epos;
}
ch_pos = end - 1;
}
++ch_pos;
}
if (out_ptr) // copy the remainder
memcpy(out_ptr, in_ptr + lpos, d_str.size_bytes() - lpos);
else
d_offsets[idx] = static_cast<int32_t>(nbytes);
}
};
} // namespace
std::unique_ptr<column> replace_re(
strings_column_view const& strings,
std::vector<std::string> const& patterns,
strings_column_view const& repls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
auto strings_count = strings.size();
if (strings_count == 0) return make_empty_strings_column(stream, mr);
if (patterns.empty()) // no patterns; just return a copy
return std::make_unique<column>(strings.parent(), stream, mr);
CUDF_EXPECTS(!repls.has_nulls(), "Parameter repls must not have any nulls");
auto d_strings = column_device_view::create(strings.parent(), stream);
auto d_repls = column_device_view::create(repls.parent(), stream);
auto d_flags = get_character_flags_table();
// compile regexes into device objects
size_type regex_insts = 0;
std::vector<std::unique_ptr<reprog_device, std::function<void(reprog_device*)>>> h_progs;
thrust::host_vector<reprog_device> progs;
for (auto itr = patterns.begin(); itr != patterns.end(); ++itr) {
auto prog = reprog_device::create(*itr, d_flags, strings_count, stream);
regex_insts = std::max(regex_insts, prog->insts_counts());
progs.push_back(*prog);
h_progs.emplace_back(std::move(prog));
}
// copy all the reprog_device instances to a device memory array
rmm::device_buffer progs_buffer{sizeof(reprog_device) * progs.size()};
CUDA_TRY(cudaMemcpyAsync(progs_buffer.data(),
progs.data(),
progs.size() * sizeof(reprog_device),
cudaMemcpyHostToDevice,
stream.value()));
reprog_device* d_progs = reinterpret_cast<reprog_device*>(progs_buffer.data());
// create working buffer for ranges pairs
rmm::device_uvector<found_range> found_ranges(patterns.size() * strings_count, stream);
auto d_found_ranges = found_ranges.data();
// create child columns
// std::pair<std::unique_ptr<column>, std::unique_ptr<column>> children(nullptr, nullptr);
auto children = [&] {
// Each invocation is predicated on the stack size which is dependent on the number of regex
// instructions
if ((regex_insts > MAX_STACK_INSTS) || (regex_insts <= RX_SMALL_INSTS))
return make_strings_children(
replace_multi_regex_fn<RX_STACK_SMALL>{
*d_strings, d_progs, static_cast<size_type>(progs.size()), d_found_ranges, *d_repls},
strings_count,
stream,
mr);
else if (regex_insts <= RX_MEDIUM_INSTS)
return make_strings_children(
replace_multi_regex_fn<RX_STACK_MEDIUM>{
*d_strings, d_progs, static_cast<size_type>(progs.size()), d_found_ranges, *d_repls},
strings_count,
stream,
mr);
else
return make_strings_children(
replace_multi_regex_fn<RX_STACK_LARGE>{
*d_strings, d_progs, static_cast<size_type>(progs.size()), d_found_ranges, *d_repls},
strings_count,
stream,
mr);
}();
return make_strings_column(strings_count,
std::move(children.first),
std::move(children.second),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr),
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> replace_re(strings_column_view const& strings,
std::vector<std::string> const& patterns,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace_re(strings, patterns, repls, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
|
3fb6b50dc3e9b15f9b7377f53644deee3c5e3220.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file gdf-csr.cu code to convert a GDF matrix into a CSR
*
*/
#include <cudf/cudf.h>
#include <utilities/error_utils.hpp>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
using namespace std;
//--- all the private functions
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, gdf_size_type * offsets);
//--- private CUDA functions / kernels
template<typename T>
__global__ void cudaCreateCSR(void *data, gdf_valid_type *valid, gdf_dtype dtype, int colID, T *A, int64_t *JA, gdf_size_type *offsets, gdf_size_type numRows);
__global__ void determineValidRecCount(gdf_valid_type *validArray, gdf_size_type numRows, gdf_size_type numCol, gdf_size_type * offset);
template<typename T>
__device__ T convertDataElement(gdf_column *gdf, int idx, gdf_dtype dtype);
__device__ int whichBitmapCSR(int record) { return (record/8); }
__device__ int whichBitCSR(int bit) { return (bit % 8); }
__device__ int checkBitCSR(gdf_valid_type data, int bit) {
gdf_valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128};
return (data & bitMask[bit]);
}
//
//------------------------------------------------------------
//
/*
* Convert a Dense GDF into a CSR GDF
*
* Restrictions: All columns need to be of the same length
*/
/**
* @brief convert a GDF into a CSR
*
* Take a matrix in GDF format and convert it into a CSR. The column major matrix needs to have every column defined.
* Passing in a COO datset will be treated as a two column matrix
*
* @param[in] gdfData the ordered list of columns
* @param[in] numCol the number of columns in the gdfData array
*
* @param[out] csrReturn a pointer to the returned data structure
*
* @return gdf_error code
*/
gdf_error gdf_to_csr(gdf_column **gdfData, int numCol, csr_gdf *csrReturn) {
int64_t numNull = 0;
int64_t nnz = 0;
gdf_size_type numRows = gdfData[0]->size;
gdf_dtype dType = gdf_dtype::GDF_invalid; // the data type to make the CSR element array (A)
/**
* Currently the gdf_dtype enum is arranged based on data size, as long as it stays that way the enum values can be
* exploited by just picking the largest enum value
*
* While looping, also get the number of null values (this will work one day)
*/
for ( int x =0; x < numCol; x++) {
if( gdfData[x]->dtype > dType)
dType = gdfData[x]->dtype;
numNull += gdfData[x]->null_count;
}
if (dType == gdf_dtype::GDF_invalid || dType == gdf_dtype::GDF_STRING )
return gdf_error::GDF_UNSUPPORTED_DTYPE;
// the number of valid elements is simple the max number of possible elements (rows * columns) minus the number of nulls
// the current problem is that algorithms are not setting null_count;
// gdf_size_type is 32bits (int) but the total size could be larger than an int, so use a long
nnz = (numRows * numCol) - numNull;
// Allocate space for the offset - this will eventually be IA - dtype is long since the sum of all column elements could be larger than int32
gdf_size_type * offsets;
RMM_TRY(RMM_ALLOC((void**)&offsets, (numRows + 2) * sizeof(int64_t), 0)); // TODO: non-default stream?
CUDA_TRY(hipMemset(offsets, 0, ( sizeof(int64_t) * (numRows + 2) ) ));
// do a pass over each columns, and have each column updates the row count
//-- threads and blocks
int threads = 1024;
int blocks = (numRows + threads - 1) / threads;
for ( int x = 0; x < numCol; x++ ) {
hipLaunchKernelGGL(( determineValidRecCount), dim3(blocks), dim3(threads), 0, 0, gdfData[x]->valid, numRows, numCol, offsets);
}
//--------------------------------------------------------------------------------------
// Now do an exclusive scan to compute the offsets for where to write data
thrust::exclusive_scan(rmm::exec_policy()->on(0), offsets, (offsets + numRows + 1), offsets);
//--------------------------------------------------------------------------------------
// get the number of elements - NNZ, this is the last item in the array
CUDA_TRY( hipMemcpy((void *)&nnz, (void *)&offsets[numRows], sizeof(int64_t), hipMemcpyDeviceToHost) );
if ( nnz == 0)
return GDF_CUDA_ERROR;
//--------------------------------------------------------------------------------------
// now start creating output data
gdf_size_type* IA;
RMM_TRY(RMM_ALLOC((void**)&IA, (numRows + 2) * sizeof(gdf_size_type), 0));
CUDA_TRY(hipMemcpy(IA, offsets, ( sizeof(gdf_size_type) * (numRows + 2) ), hipMemcpyDeviceToDevice) );
int64_t * JA;
RMM_TRY( RMM_ALLOC((void**)&JA, (sizeof(int64_t) * nnz), 0));
//----------------------------------------------------------------------------------
// Now just missing A and the moving of data
csrReturn->dtype = dType;
csrReturn->rows = numRows;
csrReturn->cols = numCol;
csrReturn->dtype = dType;
csrReturn->JA = JA;
csrReturn->IA = IA;
csrReturn->nnz = nnz;
// Start processing based on data type
gdf_error status = GDF_SUCCESS;
switch(dType) {
case gdf_dtype::GDF_INT8:
status = runConverter<int8_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT16:
status = runConverter<int16_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT32:
status = runConverter<int32_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT64:
status = runConverter<int64_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT32:
status = runConverter<float>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT64:
status = runConverter<double>(gdfData, csrReturn, offsets);
break;
default:
RMM_TRY(RMM_FREE(IA, 0));
RMM_TRY(RMM_FREE(JA, 0));
RMM_TRY(RMM_FREE(offsets, 0));
return GDF_UNSUPPORTED_DTYPE;
}
RMM_TRY(RMM_FREE(offsets, 0));
return status;
}
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, gdf_size_type * offsets) {
gdf_size_type numCols = csrReturn->cols;
gdf_size_type numRows = csrReturn->rows;
//-- threads and blocks
int threads = 1024;
if ( numRows < 100 ) {
threads = 64;
} else if (numRows < 256) {
threads = 128;
} else if ( numRows < 512) {
threads = 256;
} else if ( numRows < 1024) {
threads = 512;
}
int blocks = (numRows + threads - 1) / threads;
T * A;
RMM_TRY(RMM_ALLOC((void**)&A, (sizeof(T) * csrReturn->nnz), 0));
CUDA_TRY(hipMemset(A, 0, (sizeof(T) * csrReturn->nnz)));
// Now start moving the data and creating the CSR
for ( gdf_size_type colId = 0; colId < numCols; colId++ ) {
gdf_column *gdf = gdfData[colId];
hipLaunchKernelGGL(( cudaCreateCSR<T>), dim3(blocks), dim3(threads), 0, 0, gdf->data, gdf->valid, gdf->dtype, colId, A, csrReturn->JA, offsets, numRows);
CUDA_CHECK_LAST();
}
csrReturn->A = A;
return gdf_error::GDF_SUCCESS;
}
/*
* Move data over into CSR and possible convert format
*/
template<typename T>
__global__ void cudaCreateCSR(
void *data, gdf_valid_type *valid, gdf_dtype dtype, int colId,
T *A, int64_t *JA, gdf_size_type *offsets, gdf_size_type numRows)
{
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // which bitmap
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
gdf_valid_type bitmap = valid[bitmapIdx];
if ( checkBitCSR( bitmap, bitIdx) ) {
gdf_size_type offsetIdx = offsets[tid]; // where should this thread start writing data
A[offsetIdx] = convertDataElement<T>(data, tid, dtype);
JA[offsetIdx] = colId;
++offsets[tid];
}
}
/*
* Compute the number of valid entries per rows - a row spans multiple gdf_colums -
* There is one thread running per row, so just compute the sum for this row.
*
* the number of elements a valid array is actually ceil(numRows / 8) since it is a bitmap. the total number of bits checked is equal to numRows
*
*/
__global__ void determineValidRecCount(gdf_valid_type *valid, gdf_size_type numRows, gdf_size_type numCol, gdf_size_type * offset) {
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // want the floor of the divide
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
gdf_valid_type bitmap = valid[bitmapIdx];
if (checkBitCSR( bitmap, bitIdx) )
++offset[tid];
}
/**
* Convert the data element into a common format
*/
template<typename T>
__device__ T convertDataElement(void *data, int tid, gdf_dtype dtype) {
T answer;
switch(dtype) {
case gdf_dtype::GDF_INT8: {
int8_t *a = (int8_t *)data;
answer = (T)(a[tid]);
break;
}
case gdf_dtype::GDF_INT16: {
int16_t *b = (int16_t *)data;
answer = (T)(b[tid]);
break;
}
case gdf_dtype::GDF_INT32: {
int32_t *c = (int32_t *)data;
answer = (T)(c[tid]);
break;
}
case gdf_dtype::GDF_INT64: {
int64_t *d = (int64_t *)data;
answer = (T)(d[tid]);
break;
}
case gdf_dtype::GDF_FLOAT32: {
float *e = (float *)data;
answer = (T)(e[tid]);
break;
}
case gdf_dtype::GDF_FLOAT64: {
double *f = (double *)data;
answer = (T)(f[tid]);
break;
}
}
return answer;
}
| 3fb6b50dc3e9b15f9b7377f53644deee3c5e3220.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file gdf-csr.cu code to convert a GDF matrix into a CSR
*
*/
#include <cudf/cudf.h>
#include <utilities/error_utils.hpp>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
using namespace std;
//--- all the private functions
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, gdf_size_type * offsets);
//--- private CUDA functions / kernels
template<typename T>
__global__ void cudaCreateCSR(void *data, gdf_valid_type *valid, gdf_dtype dtype, int colID, T *A, int64_t *JA, gdf_size_type *offsets, gdf_size_type numRows);
__global__ void determineValidRecCount(gdf_valid_type *validArray, gdf_size_type numRows, gdf_size_type numCol, gdf_size_type * offset);
template<typename T>
__device__ T convertDataElement(gdf_column *gdf, int idx, gdf_dtype dtype);
__device__ int whichBitmapCSR(int record) { return (record/8); }
__device__ int whichBitCSR(int bit) { return (bit % 8); }
__device__ int checkBitCSR(gdf_valid_type data, int bit) {
gdf_valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128};
return (data & bitMask[bit]);
}
//
//------------------------------------------------------------
//
/*
* Convert a Dense GDF into a CSR GDF
*
* Restrictions: All columns need to be of the same length
*/
/**
* @brief convert a GDF into a CSR
*
* Take a matrix in GDF format and convert it into a CSR. The column major matrix needs to have every column defined.
* Passing in a COO datset will be treated as a two column matrix
*
* @param[in] gdfData the ordered list of columns
* @param[in] numCol the number of columns in the gdfData array
*
* @param[out] csrReturn a pointer to the returned data structure
*
* @return gdf_error code
*/
gdf_error gdf_to_csr(gdf_column **gdfData, int numCol, csr_gdf *csrReturn) {
int64_t numNull = 0;
int64_t nnz = 0;
gdf_size_type numRows = gdfData[0]->size;
gdf_dtype dType = gdf_dtype::GDF_invalid; // the data type to make the CSR element array (A)
/**
* Currently the gdf_dtype enum is arranged based on data size, as long as it stays that way the enum values can be
* exploited by just picking the largest enum value
*
* While looping, also get the number of null values (this will work one day)
*/
for ( int x =0; x < numCol; x++) {
if( gdfData[x]->dtype > dType)
dType = gdfData[x]->dtype;
numNull += gdfData[x]->null_count;
}
if (dType == gdf_dtype::GDF_invalid || dType == gdf_dtype::GDF_STRING )
return gdf_error::GDF_UNSUPPORTED_DTYPE;
// the number of valid elements is simple the max number of possible elements (rows * columns) minus the number of nulls
// the current problem is that algorithms are not setting null_count;
// gdf_size_type is 32bits (int) but the total size could be larger than an int, so use a long
nnz = (numRows * numCol) - numNull;
// Allocate space for the offset - this will eventually be IA - dtype is long since the sum of all column elements could be larger than int32
gdf_size_type * offsets;
RMM_TRY(RMM_ALLOC((void**)&offsets, (numRows + 2) * sizeof(int64_t), 0)); // TODO: non-default stream?
CUDA_TRY(cudaMemset(offsets, 0, ( sizeof(int64_t) * (numRows + 2) ) ));
// do a pass over each columns, and have each column updates the row count
//-- threads and blocks
int threads = 1024;
int blocks = (numRows + threads - 1) / threads;
for ( int x = 0; x < numCol; x++ ) {
determineValidRecCount<<<blocks, threads>>>(gdfData[x]->valid, numRows, numCol, offsets);
}
//--------------------------------------------------------------------------------------
// Now do an exclusive scan to compute the offsets for where to write data
thrust::exclusive_scan(rmm::exec_policy()->on(0), offsets, (offsets + numRows + 1), offsets);
//--------------------------------------------------------------------------------------
// get the number of elements - NNZ, this is the last item in the array
CUDA_TRY( cudaMemcpy((void *)&nnz, (void *)&offsets[numRows], sizeof(int64_t), cudaMemcpyDeviceToHost) );
if ( nnz == 0)
return GDF_CUDA_ERROR;
//--------------------------------------------------------------------------------------
// now start creating output data
gdf_size_type* IA;
RMM_TRY(RMM_ALLOC((void**)&IA, (numRows + 2) * sizeof(gdf_size_type), 0));
CUDA_TRY(cudaMemcpy(IA, offsets, ( sizeof(gdf_size_type) * (numRows + 2) ), cudaMemcpyDeviceToDevice) );
int64_t * JA;
RMM_TRY( RMM_ALLOC((void**)&JA, (sizeof(int64_t) * nnz), 0));
//----------------------------------------------------------------------------------
// Now just missing A and the moving of data
csrReturn->dtype = dType;
csrReturn->rows = numRows;
csrReturn->cols = numCol;
csrReturn->dtype = dType;
csrReturn->JA = JA;
csrReturn->IA = IA;
csrReturn->nnz = nnz;
// Start processing based on data type
gdf_error status = GDF_SUCCESS;
switch(dType) {
case gdf_dtype::GDF_INT8:
status = runConverter<int8_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT16:
status = runConverter<int16_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT32:
status = runConverter<int32_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT64:
status = runConverter<int64_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT32:
status = runConverter<float>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT64:
status = runConverter<double>(gdfData, csrReturn, offsets);
break;
default:
RMM_TRY(RMM_FREE(IA, 0));
RMM_TRY(RMM_FREE(JA, 0));
RMM_TRY(RMM_FREE(offsets, 0));
return GDF_UNSUPPORTED_DTYPE;
}
RMM_TRY(RMM_FREE(offsets, 0));
return status;
}
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, gdf_size_type * offsets) {
gdf_size_type numCols = csrReturn->cols;
gdf_size_type numRows = csrReturn->rows;
//-- threads and blocks
int threads = 1024;
if ( numRows < 100 ) {
threads = 64;
} else if (numRows < 256) {
threads = 128;
} else if ( numRows < 512) {
threads = 256;
} else if ( numRows < 1024) {
threads = 512;
}
int blocks = (numRows + threads - 1) / threads;
T * A;
RMM_TRY(RMM_ALLOC((void**)&A, (sizeof(T) * csrReturn->nnz), 0));
CUDA_TRY(cudaMemset(A, 0, (sizeof(T) * csrReturn->nnz)));
// Now start moving the data and creating the CSR
for ( gdf_size_type colId = 0; colId < numCols; colId++ ) {
gdf_column *gdf = gdfData[colId];
cudaCreateCSR<T><<<blocks, threads>>>(gdf->data, gdf->valid, gdf->dtype, colId, A, csrReturn->JA, offsets, numRows);
CUDA_CHECK_LAST();
}
csrReturn->A = A;
return gdf_error::GDF_SUCCESS;
}
/*
* Move data over into CSR and possible convert format
*/
template<typename T>
__global__ void cudaCreateCSR(
void *data, gdf_valid_type *valid, gdf_dtype dtype, int colId,
T *A, int64_t *JA, gdf_size_type *offsets, gdf_size_type numRows)
{
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // which bitmap
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
gdf_valid_type bitmap = valid[bitmapIdx];
if ( checkBitCSR( bitmap, bitIdx) ) {
gdf_size_type offsetIdx = offsets[tid]; // where should this thread start writing data
A[offsetIdx] = convertDataElement<T>(data, tid, dtype);
JA[offsetIdx] = colId;
++offsets[tid];
}
}
/*
* Compute the number of valid entries per rows - a row spans multiple gdf_colums -
* There is one thread running per row, so just compute the sum for this row.
*
* the number of elements a valid array is actually ceil(numRows / 8) since it is a bitmap. the total number of bits checked is equal to numRows
*
*/
__global__ void determineValidRecCount(gdf_valid_type *valid, gdf_size_type numRows, gdf_size_type numCol, gdf_size_type * offset) {
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // want the floor of the divide
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
gdf_valid_type bitmap = valid[bitmapIdx];
if (checkBitCSR( bitmap, bitIdx) )
++offset[tid];
}
/**
* Convert the data element into a common format
*/
template<typename T>
__device__ T convertDataElement(void *data, int tid, gdf_dtype dtype) {
T answer;
switch(dtype) {
case gdf_dtype::GDF_INT8: {
int8_t *a = (int8_t *)data;
answer = (T)(a[tid]);
break;
}
case gdf_dtype::GDF_INT16: {
int16_t *b = (int16_t *)data;
answer = (T)(b[tid]);
break;
}
case gdf_dtype::GDF_INT32: {
int32_t *c = (int32_t *)data;
answer = (T)(c[tid]);
break;
}
case gdf_dtype::GDF_INT64: {
int64_t *d = (int64_t *)data;
answer = (T)(d[tid]);
break;
}
case gdf_dtype::GDF_FLOAT32: {
float *e = (float *)data;
answer = (T)(e[tid]);
break;
}
case gdf_dtype::GDF_FLOAT64: {
double *f = (double *)data;
answer = (T)(f[tid]);
break;
}
}
return answer;
}
|
5c61b4270dc499b05cfdc82fee2bc2ee10266986.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019 Opticks Team. All Rights Reserved.
*
* This file is part of Opticks
* (see https://bitbucket.org/simoncblyth/opticks).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/for_each.h>
#include <thrust/device_vector.h>
struct printf_functor_i
{
__host__ __device__
void operator()(int x)
{
printf("%d\n", x);
}
};
struct printf_functor_f4
{
__host__ __device__
void operator()(float4 v)
{
printf("%10.4f %10.4f %10.4f %10.4f \n", v.x, v.y, v.z, v.w);
}
};
int main()
{
thrust::device_vector<int> ivec(3);
ivec[0] = 0;
ivec[1] = 1;
ivec[2] = 2;
thrust::for_each(ivec.begin(), ivec.end(), printf_functor_i());
thrust::device_vector<float4> fvec(3);
fvec[0] = make_float4( 1.f, 2.f, 3.f, 4.f );
fvec[1] = make_float4( 1.f, 2.f, 3.f, 4.f );
fvec[2] = make_float4( 1.f, 2.f, 3.f, 4.f );
thrust::for_each(fvec.begin(), fvec.end(), printf_functor_f4());
hipDeviceSynchronize();
// Without the sync the process will typically terminate before
// any output stream gets pumped out to the terminal when
// iterating over device_ptr.
// Curiously that doesnt seem to happen with device_vector ?
// Maybe their dtors are delayed by the dumping
}
| 5c61b4270dc499b05cfdc82fee2bc2ee10266986.cu | /*
* Copyright (c) 2019 Opticks Team. All Rights Reserved.
*
* This file is part of Opticks
* (see https://bitbucket.org/simoncblyth/opticks).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/for_each.h>
#include <thrust/device_vector.h>
struct printf_functor_i
{
__host__ __device__
void operator()(int x)
{
printf("%d\n", x);
}
};
struct printf_functor_f4
{
__host__ __device__
void operator()(float4 v)
{
printf("%10.4f %10.4f %10.4f %10.4f \n", v.x, v.y, v.z, v.w);
}
};
int main()
{
thrust::device_vector<int> ivec(3);
ivec[0] = 0;
ivec[1] = 1;
ivec[2] = 2;
thrust::for_each(ivec.begin(), ivec.end(), printf_functor_i());
thrust::device_vector<float4> fvec(3);
fvec[0] = make_float4( 1.f, 2.f, 3.f, 4.f );
fvec[1] = make_float4( 1.f, 2.f, 3.f, 4.f );
fvec[2] = make_float4( 1.f, 2.f, 3.f, 4.f );
thrust::for_each(fvec.begin(), fvec.end(), printf_functor_f4());
cudaDeviceSynchronize();
// Without the sync the process will typically terminate before
// any output stream gets pumped out to the terminal when
// iterating over device_ptr.
// Curiously that doesnt seem to happen with device_vector ?
// Maybe their dtors are delayed by the dumping
}
|
f96ac3a6e3535f40bf0908da949c18f2c78a3af2.hip | // !!! This is a file automatically generated by hipify!!!
/*
TODO:
- only load parts of the file, in accordance with a prototxt param "max_mem"
*/
#include <stdint.h>
#include <vector>
#include "hdf5.h"
#include "hdf5_hl.h"
#include "caffe/data_layers.hpp"
namespace caffe {
template<typename Dtype>
void HDF5DataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
const int_tp batch_size = this->layer_param_.hdf5_data_param().batch_size();
for (int_tp i = 0; i < batch_size; ++i, ++current_row_) {
if (current_row_ == hdf_blobs_[0]->shape(0)) {
if (num_files_ > 1) {
current_file_ += 1;
if (current_file_ == num_files_) {
current_file_ = 0;
if (this->layer_param_.hdf5_data_param().shuffle()) {
std::random_shuffle(file_permutation_.begin(),
file_permutation_.end());
}
DLOG(INFO)<< "Looping around to first file.";
}
LoadHDF5FileData(
hdf_filenames_[file_permutation_[current_file_]].c_str());
}
current_row_ = 0;
if (this->layer_param_.hdf5_data_param().shuffle())
std::random_shuffle(data_permutation_.begin(),
data_permutation_.end());
}
for (int_tp j = 0; j < this->layer_param_.top_size(); ++j) {
int_tp data_dim = top[j]->count() / top[j]->shape(0);
caffe_copy(
data_dim,
&hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_]
* data_dim],
&top[j]->mutable_gpu_data()[i * data_dim]);
}
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
Forward_cpu(bottom, top);
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(HDF5DataLayer);
} // namespace caffe
| f96ac3a6e3535f40bf0908da949c18f2c78a3af2.cu | /*
TODO:
- only load parts of the file, in accordance with a prototxt param "max_mem"
*/
#include <stdint.h>
#include <vector>
#include "hdf5.h"
#include "hdf5_hl.h"
#include "caffe/data_layers.hpp"
namespace caffe {
template<typename Dtype>
void HDF5DataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
const int_tp batch_size = this->layer_param_.hdf5_data_param().batch_size();
for (int_tp i = 0; i < batch_size; ++i, ++current_row_) {
if (current_row_ == hdf_blobs_[0]->shape(0)) {
if (num_files_ > 1) {
current_file_ += 1;
if (current_file_ == num_files_) {
current_file_ = 0;
if (this->layer_param_.hdf5_data_param().shuffle()) {
std::random_shuffle(file_permutation_.begin(),
file_permutation_.end());
}
DLOG(INFO)<< "Looping around to first file.";
}
LoadHDF5FileData(
hdf_filenames_[file_permutation_[current_file_]].c_str());
}
current_row_ = 0;
if (this->layer_param_.hdf5_data_param().shuffle())
std::random_shuffle(data_permutation_.begin(),
data_permutation_.end());
}
for (int_tp j = 0; j < this->layer_param_.top_size(); ++j) {
int_tp data_dim = top[j]->count() / top[j]->shape(0);
caffe_copy(
data_dim,
&hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_]
* data_dim],
&top[j]->mutable_gpu_data()[i * data_dim]);
}
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
Forward_cpu(bottom, top);
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(HDF5DataLayer);
} // namespace caffe
|
4ec91f190c845edd2c2036cd5e490d4e18b17b44.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// 2D float texture
texture<float, hipTextureType2D, hipReadModeElementType> texRef;
// Simple transformation kernel
__global__ void transformKernel(float* output,
int width, int height,
float theta)
{
// Calculate normalized texture coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
float u = x / (float)width;
float v = y / (float)height;
// Transform coordinates
u -= 0.5f;
v -= 0.5f;
float tu = u * cosf(theta) - v * sinf(theta) + 0.5f;
float tv = v * cosf(theta) + u * sinf(theta) + 0.5f;
// Read from texture and write to global memory
output[y * width + x] = tex2D(texRef, tu, tv);
}
// Host code
int main()
{
// Allocate CUDA array in device memory
hipChannelFormatDesc channelDesc =
hipCreateChannelDesc(32, 0, 0, 0,
hipChannelFormatKindFloat);
hipArray* cuArray;
hipMallocArray(&cuArray, &channelDesc, width, height);
// Copy to device memory some data located at address h\_data
// in host memory
hipMemcpyToArray(cuArray, 0, 0, h_data, size,
hipMemcpyHostToDevice);
// Set texture reference parameters
texRef.addressMode[0] = hipAddressModeWrap;
texRef.addressMode[1] = hipAddressModeWrap;
texRef.filterMode = hipFilterModeLinear;
texRef.normalized = true;
// Bind the array to the texture reference
hipBindTextureToArray(texRef, cuArray, channelDesc);
// Allocate result of transformation in device memory
float* output;
hipMalloc(&output, width * height * sizeof(float));
// Invoke kernel
dim3 dimBlock(16, 16);
dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x,
(height + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( transformKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, output, width, height,
angle);
// Free device memory
hipFreeArray(cuArray);
hipFree(output);
return 0;
}
| 4ec91f190c845edd2c2036cd5e490d4e18b17b44.cu | // 2D float texture
texture<float, cudaTextureType2D, cudaReadModeElementType> texRef;
// Simple transformation kernel
__global__ void transformKernel(float* output,
int width, int height,
float theta)
{
// Calculate normalized texture coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
float u = x / (float)width;
float v = y / (float)height;
// Transform coordinates
u -= 0.5f;
v -= 0.5f;
float tu = u * cosf(theta) - v * sinf(theta) + 0.5f;
float tv = v * cosf(theta) + u * sinf(theta) + 0.5f;
// Read from texture and write to global memory
output[y * width + x] = tex2D(texRef, tu, tv);
}
// Host code
int main()
{
// Allocate CUDA array in device memory
cudaChannelFormatDesc channelDesc =
cudaCreateChannelDesc(32, 0, 0, 0,
cudaChannelFormatKindFloat);
cudaArray* cuArray;
cudaMallocArray(&cuArray, &channelDesc, width, height);
// Copy to device memory some data located at address h\_data
// in host memory
cudaMemcpyToArray(cuArray, 0, 0, h_data, size,
cudaMemcpyHostToDevice);
// Set texture reference parameters
texRef.addressMode[0] = cudaAddressModeWrap;
texRef.addressMode[1] = cudaAddressModeWrap;
texRef.filterMode = cudaFilterModeLinear;
texRef.normalized = true;
// Bind the array to the texture reference
cudaBindTextureToArray(texRef, cuArray, channelDesc);
// Allocate result of transformation in device memory
float* output;
cudaMalloc(&output, width * height * sizeof(float));
// Invoke kernel
dim3 dimBlock(16, 16);
dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x,
(height + dimBlock.y - 1) / dimBlock.y);
transformKernel<<<dimGrid, dimBlock>>>(output, width, height,
angle);
// Free device memory
cudaFreeArray(cuArray);
cudaFree(output);
return 0;
}
|
cd520e06c868e6b72c8f7a5b4d368356694248de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/crop_layer.hpp"
namespace caffe {
// Copy (one line per thread) from one array to another, with arbitrary
// strides in the last two dimensions.
template <typename Dtype>
__global__ void copy_kernel(const int n, const int height, const int width,
const int src_outer_stride, const int src_inner_stride,
const int dest_outer_stride, const int dest_inner_stride,
const Dtype* src, Dtype* dest) {
CUDA_KERNEL_LOOP(index, n) {
int src_start = index / height * src_outer_stride
+ index % height * src_inner_stride;
int dest_start = index / height * dest_outer_stride
+ index % height * dest_inner_stride;
for (int i = 0; i < width; ++i) {
dest[dest_start + i] = src[src_start + i];
}
}
}
template <typename Dtype>
void CropLayer<Dtype>::crop_copy_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top,
const vector<int>& offsets,
vector<int> indices,
int cur_dim,
const Dtype* src_data,
Dtype* dest_data,
bool is_forward) {
if (cur_dim + 2 < top[0]->num_axes()) {
// We are not yet at the final dimension, call copy recursivley
for (int i = 0; i < top[0]->shape(cur_dim); ++i) {
indices[cur_dim] = i;
crop_copy_gpu(bottom, top, offsets, indices, cur_dim+1,
src_data, dest_data, is_forward);
}
} else {
// We are at the last two dimensions, which are stored continously in memory
// With (N,C,H,W)
// (0,1,2,3) cur_dim -> H
// cur_dim+1 -> W
const int lines = top[0]->shape(cur_dim);
const int height = top[0]->shape(cur_dim);
const int width = top[0]->shape(cur_dim+1);
std::vector<int> ind_off(cur_dim+2, 0);
for (int j = 0; j < cur_dim; ++j) {
ind_off[j] = indices[j] + offsets[j];
}
ind_off[cur_dim] = offsets[cur_dim];
ind_off[cur_dim+1] = offsets[cur_dim+1];
// Compute copy strides
const int src_outer_stride =
bottom[0]->shape(cur_dim)*bottom[0]->shape(cur_dim+1);
const int src_inner_stride = bottom[0]->shape(cur_dim+1);
const int dest_outer_stride =
top[0]->shape(cur_dim)*top[0]->shape(cur_dim+1);
const int dest_inner_stride = top[0]->shape(cur_dim+1);
if (is_forward) {
const Dtype* bottom_data = bottom[0]->gpu_data() +
bottom[0]->offset(ind_off);
Dtype* top_data = top[0]->mutable_gpu_data() +
top[0]->offset(indices);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( copy_kernel), dim3(CAFFE_GET_BLOCKS(lines)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
lines, height, width,
src_outer_stride, src_inner_stride,
dest_outer_stride, dest_inner_stride,
bottom_data, top_data);
} else {
const Dtype* top_diff = top[0]->gpu_diff() +
top[0]->offset(indices);
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff() +
bottom[0]->offset(ind_off);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( copy_kernel), dim3(CAFFE_GET_BLOCKS(lines)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
lines, height, width,
dest_outer_stride, dest_inner_stride,
src_outer_stride, src_inner_stride,
top_diff, bottom_diff);
}
}
}
template <typename Dtype>
void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
std::vector<int> indices(top[0]->num_axes(), 0);
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
crop_copy_gpu(bottom, top, offsets, indices, 0, bottom_data, top_data, true);
}
template <typename Dtype>
void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (propagate_down[0]) {
caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff);
std::vector<int> indices(top[0]->num_axes(), 0);
crop_copy_gpu(bottom, top, offsets, indices, 0, top_diff, bottom_diff,
false);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CropLayer);
} // namespace caffe
| cd520e06c868e6b72c8f7a5b4d368356694248de.cu | #include <vector>
#include "caffe/crop_layer.hpp"
namespace caffe {
// Copy (one line per thread) from one array to another, with arbitrary
// strides in the last two dimensions.
template <typename Dtype>
__global__ void copy_kernel(const int n, const int height, const int width,
const int src_outer_stride, const int src_inner_stride,
const int dest_outer_stride, const int dest_inner_stride,
const Dtype* src, Dtype* dest) {
CUDA_KERNEL_LOOP(index, n) {
int src_start = index / height * src_outer_stride
+ index % height * src_inner_stride;
int dest_start = index / height * dest_outer_stride
+ index % height * dest_inner_stride;
for (int i = 0; i < width; ++i) {
dest[dest_start + i] = src[src_start + i];
}
}
}
template <typename Dtype>
void CropLayer<Dtype>::crop_copy_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top,
const vector<int>& offsets,
vector<int> indices,
int cur_dim,
const Dtype* src_data,
Dtype* dest_data,
bool is_forward) {
if (cur_dim + 2 < top[0]->num_axes()) {
// We are not yet at the final dimension, call copy recursivley
for (int i = 0; i < top[0]->shape(cur_dim); ++i) {
indices[cur_dim] = i;
crop_copy_gpu(bottom, top, offsets, indices, cur_dim+1,
src_data, dest_data, is_forward);
}
} else {
// We are at the last two dimensions, which are stored continously in memory
// With (N,C,H,W)
// (0,1,2,3) cur_dim -> H
// cur_dim+1 -> W
const int lines = top[0]->shape(cur_dim);
const int height = top[0]->shape(cur_dim);
const int width = top[0]->shape(cur_dim+1);
std::vector<int> ind_off(cur_dim+2, 0);
for (int j = 0; j < cur_dim; ++j) {
ind_off[j] = indices[j] + offsets[j];
}
ind_off[cur_dim] = offsets[cur_dim];
ind_off[cur_dim+1] = offsets[cur_dim+1];
// Compute copy strides
const int src_outer_stride =
bottom[0]->shape(cur_dim)*bottom[0]->shape(cur_dim+1);
const int src_inner_stride = bottom[0]->shape(cur_dim+1);
const int dest_outer_stride =
top[0]->shape(cur_dim)*top[0]->shape(cur_dim+1);
const int dest_inner_stride = top[0]->shape(cur_dim+1);
if (is_forward) {
const Dtype* bottom_data = bottom[0]->gpu_data() +
bottom[0]->offset(ind_off);
Dtype* top_data = top[0]->mutable_gpu_data() +
top[0]->offset(indices);
// NOLINT_NEXT_LINE(whitespace/operators)
copy_kernel<<<CAFFE_GET_BLOCKS(lines), CAFFE_CUDA_NUM_THREADS>>>(
lines, height, width,
src_outer_stride, src_inner_stride,
dest_outer_stride, dest_inner_stride,
bottom_data, top_data);
} else {
const Dtype* top_diff = top[0]->gpu_diff() +
top[0]->offset(indices);
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff() +
bottom[0]->offset(ind_off);
// NOLINT_NEXT_LINE(whitespace/operators)
copy_kernel<<<CAFFE_GET_BLOCKS(lines), CAFFE_CUDA_NUM_THREADS>>>(
lines, height, width,
dest_outer_stride, dest_inner_stride,
src_outer_stride, src_inner_stride,
top_diff, bottom_diff);
}
}
}
template <typename Dtype>
void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
std::vector<int> indices(top[0]->num_axes(), 0);
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
crop_copy_gpu(bottom, top, offsets, indices, 0, bottom_data, top_data, true);
}
template <typename Dtype>
void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (propagate_down[0]) {
caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff);
std::vector<int> indices(top[0]->num_axes(), 0);
crop_copy_gpu(bottom, top, offsets, indices, 0, top_diff, bottom_diff,
false);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CropLayer);
} // namespace caffe
|
804625975373f3b63d562b750356c2650f67f326.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/transpose_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void transpose_gpu(const int nthreads, const Dtype* from_data,
Dtype* to_data, const int* from_counts, const int* to_counts,
const int* map, const int num_axes, int* buf) {
CUDA_KERNEL_LOOP(index, nthreads) {
int* from_inds = buf + index * num_axes;
int from_index = index, to_index = 0;
for (int i = 0; i < num_axes; i++) {
from_inds[i] = from_index / from_counts[i];
from_index = from_index % from_counts[i];
}
for (int i = 0; i < num_axes; i++) {
to_index += from_inds[map[i]] * to_counts[i];
}
*(to_data + to_index) = *(from_data + index);
}
}
template <typename Dtype>
void TransposeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int nthreads = bottom[0]->count();
transpose_gpu<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, bottom[0]->gpu_data(), top[0]->mutable_gpu_data(),
bottom_counts_.gpu_data(), top_counts_.gpu_data(),
forward_map_.gpu_data(), bottom[0]->shape().size(),
buf_.mutable_gpu_data());
}
template <typename Dtype>
void TransposeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const int nthreads = bottom[0]->count();
transpose_gpu<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff(),
top_counts_.gpu_data(), bottom_counts_.gpu_data(),
backward_map_.gpu_data(), bottom[0]->shape().size(),
buf_.mutable_gpu_data());
}
INSTANTIATE_LAYER_GPU_FUNCS(TransposeLayer);
} // namespace caffe
| 804625975373f3b63d562b750356c2650f67f326.cu | #include <vector>
#include "caffe/layers/transpose_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void transpose_gpu(const int nthreads, const Dtype* from_data,
Dtype* to_data, const int* from_counts, const int* to_counts,
const int* map, const int num_axes, int* buf) {
CUDA_KERNEL_LOOP(index, nthreads) {
int* from_inds = buf + index * num_axes;
int from_index = index, to_index = 0;
for (int i = 0; i < num_axes; i++) {
from_inds[i] = from_index / from_counts[i];
from_index = from_index % from_counts[i];
}
for (int i = 0; i < num_axes; i++) {
to_index += from_inds[map[i]] * to_counts[i];
}
*(to_data + to_index) = *(from_data + index);
}
}
template <typename Dtype>
void TransposeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int nthreads = bottom[0]->count();
transpose_gpu<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, bottom[0]->gpu_data(), top[0]->mutable_gpu_data(),
bottom_counts_.gpu_data(), top_counts_.gpu_data(),
forward_map_.gpu_data(), bottom[0]->shape().size(),
buf_.mutable_gpu_data());
}
template <typename Dtype>
void TransposeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const int nthreads = bottom[0]->count();
transpose_gpu<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff(),
top_counts_.gpu_data(), bottom_counts_.gpu_data(),
backward_map_.gpu_data(), bottom[0]->shape().size(),
buf_.mutable_gpu_data());
}
INSTANTIATE_LAYER_GPU_FUNCS(TransposeLayer);
} // namespace caffe
|
511f7374d965a03bd14680e59dcf2d5e57aeed2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* calcAdvectionWHA.cu
*
* Created on: 16-04-2013
* Author: Kamil Szewc (kamil.szewc@gmail.com)
*/
#include "../../sph.h"
__global__ void calcAdvectionWHA(Particle *p, Parameters *par)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < par->N) {
p[tid].vel.x += par->DT * ((1.0 - par->XSPH)*p[tid].rh_vel.x + par->XSPH*p[tid].rh_pos.x + p[tid].st.x + par->G_X);
p[tid].vel.y += par->DT * ((1.0 - par->XSPH)*p[tid].rh_vel.y + par->XSPH*p[tid].rh_pos.y + p[tid].st.y + par->G_Y);
p[tid].vel.z += par->DT * ((1.0 - par->XSPH)*p[tid].rh_vel.z + par->XSPH*p[tid].rh_pos.z + p[tid].st.z + par->G_Z);
p[tid].pos.x += par->DT * p[tid].vel.x;
p[tid].pos.y += par->DT * p[tid].vel.y;
p[tid].pos.z += par->DT * p[tid].vel.z;
if (par->T_BOUNDARY_PERIODICITY == 0) //X,Y
{
if (p[tid].pos.x > par->XCV)
{
p[tid].vel.x = -p[tid].vel.x;
p[tid].pos.x = 2.0 * par->XCV - p[tid].pos.x;
}
if (p[tid].pos.x <= 0.0)
{
p[tid].vel.x = -p[tid].vel.x;
p[tid].pos.x = -p[tid].pos.x;
}
if (p[tid].pos.y > par->YCV)
{
p[tid].vel.y = -p[tid].vel.y;
p[tid].pos.y = 2.0 * par->YCV - p[tid].pos.y;
}
if (p[tid].pos.y <= 0.0)
{
p[tid].vel.y = -p[tid].vel.y;
p[tid].pos.y = -p[tid].pos.y;
}
}
else
{
if (p[tid].pos.x > par->XCV) p[tid].pos.x -= par->XCV;
if (p[tid].pos.x <= 0.0) p[tid].pos.x += par->XCV;
if (p[tid].pos.y > par->YCV) p[tid].pos.y -= par->YCV;
if (p[tid].pos.y <= 0.0) p[tid].pos.y += par->YCV;
}
if ((par->T_BOUNDARY_PERIODICITY == 0) || (par->T_BOUNDARY_PERIODICITY == 2)) //Z
{
if (p[tid].pos.z > par->ZCV)
{
p[tid].vel.z = -p[tid].vel.z;
p[tid].pos.z = 2.0 * par->ZCV - p[tid].pos.z;
}
if (p[tid].pos.z <= 0.0)
{
p[tid].vel.z = -p[tid].vel.z;
p[tid].pos.z = -p[tid].pos.z;
}
}
else
{
if (p[tid].pos.z > par->ZCV) p[tid].pos.z -= par->ZCV;
if (p[tid].pos.z <= 0.0) p[tid].pos.z += par->ZCV;
}
tid += blockDim.x * gridDim.x;
}
}
| 511f7374d965a03bd14680e59dcf2d5e57aeed2c.cu | /*
* calcAdvectionWHA.cu
*
* Created on: 16-04-2013
* Author: Kamil Szewc (kamil.szewc@gmail.com)
*/
#include "../../sph.h"
__global__ void calcAdvectionWHA(Particle *p, Parameters *par)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < par->N) {
p[tid].vel.x += par->DT * ((1.0 - par->XSPH)*p[tid].rh_vel.x + par->XSPH*p[tid].rh_pos.x + p[tid].st.x + par->G_X);
p[tid].vel.y += par->DT * ((1.0 - par->XSPH)*p[tid].rh_vel.y + par->XSPH*p[tid].rh_pos.y + p[tid].st.y + par->G_Y);
p[tid].vel.z += par->DT * ((1.0 - par->XSPH)*p[tid].rh_vel.z + par->XSPH*p[tid].rh_pos.z + p[tid].st.z + par->G_Z);
p[tid].pos.x += par->DT * p[tid].vel.x;
p[tid].pos.y += par->DT * p[tid].vel.y;
p[tid].pos.z += par->DT * p[tid].vel.z;
if (par->T_BOUNDARY_PERIODICITY == 0) //X,Y
{
if (p[tid].pos.x > par->XCV)
{
p[tid].vel.x = -p[tid].vel.x;
p[tid].pos.x = 2.0 * par->XCV - p[tid].pos.x;
}
if (p[tid].pos.x <= 0.0)
{
p[tid].vel.x = -p[tid].vel.x;
p[tid].pos.x = -p[tid].pos.x;
}
if (p[tid].pos.y > par->YCV)
{
p[tid].vel.y = -p[tid].vel.y;
p[tid].pos.y = 2.0 * par->YCV - p[tid].pos.y;
}
if (p[tid].pos.y <= 0.0)
{
p[tid].vel.y = -p[tid].vel.y;
p[tid].pos.y = -p[tid].pos.y;
}
}
else
{
if (p[tid].pos.x > par->XCV) p[tid].pos.x -= par->XCV;
if (p[tid].pos.x <= 0.0) p[tid].pos.x += par->XCV;
if (p[tid].pos.y > par->YCV) p[tid].pos.y -= par->YCV;
if (p[tid].pos.y <= 0.0) p[tid].pos.y += par->YCV;
}
if ((par->T_BOUNDARY_PERIODICITY == 0) || (par->T_BOUNDARY_PERIODICITY == 2)) //Z
{
if (p[tid].pos.z > par->ZCV)
{
p[tid].vel.z = -p[tid].vel.z;
p[tid].pos.z = 2.0 * par->ZCV - p[tid].pos.z;
}
if (p[tid].pos.z <= 0.0)
{
p[tid].vel.z = -p[tid].vel.z;
p[tid].pos.z = -p[tid].pos.z;
}
}
else
{
if (p[tid].pos.z > par->ZCV) p[tid].pos.z -= par->ZCV;
if (p[tid].pos.z <= 0.0) p[tid].pos.z += par->ZCV;
}
tid += blockDim.x * gridDim.x;
}
}
|
0c2cd9452088422e73dab18a102d9a14f4d39652.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/cudapars.h"
#include "../include/paramssteeringtest1.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "../include/smaugcukernels.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "../include/gradops_db.cuh"
#include "../include/dervfields_db.cuh"
__device__ __host__
real dbsourcerho (real *dw, real *wd, real *w, struct params *p,int *ii) {
real src=0;
return src;
}
__device__ __host__
real dbsourcemom (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
real src=0;
switch(direction)
{
case 0:
src= -wd[fencode3_db(p,ii,divb)]*w[fencode3_db(p,ii,b1)];
break;
case 1:
src= -wd[fencode3_db(p,ii,divb)]*w[fencode3_db(p,ii,b2)];
break;
#ifdef USE_SAC_3D
case 2:
src= -wd[fencode3_db(p,ii,divb)]*w[fencode3_db(p,ii,b3)];
break;
#endif
}
return(isnan(src)?0:src);
}
__device__ __host__
real dbsourceb (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
real src=0;
switch(direction)
{
#ifdef USE_SAC
case 0:
src= -wd[fencode3_db(p,ii,divb)]*w[fencode3_db(p,ii,mom1)]/(w[fencode3_db(p,ii,rho)]+w[fencode3_db(p,ii,rhob)]);
break;
case 1:
src= -wd[fencode3_db(p,ii,divb)]*w[fencode3_db(p,ii,mom2)]/(w[fencode3_db(p,ii,rho)]+w[fencode3_db(p,ii,rhob)]);
break;
#endif
#ifdef USE_SAC_3D
case 2:
src= -wd[fencode3_db(p,ii,divb)]*w[fencode3_db(p,ii,mom3)]/(w[fencode3_db(p,ii,rho)]+w[fencode3_db(p,ii,rhob)]);
break;
#endif
}
return(isnan(src)?0:src);
}
__device__ __host__
real dbsourceenergy (real *dw, real *wd, real *w, struct params *p,int *ii) {
real src=0;
src= -wd[fencode3_db(p,ii,divb)]*wd[fencode3_db(p,ii,bdotv)];
return ( src);
}
__device__ __host__
int dbderivsourcerho (real *dw, real *wd, real *w, struct params *p,int *ii) {
int status=0;
int field=rho;
dw[fencode3_db(p,ii,field)]=dw[fencode3_db(p,ii,field)]+dbsourcerho(dw,wd,w,p,ii);
//dw[fencode3_db(p,ii,field)]=w[fencode3_db(p,ii,field)]+10;
return ( status);
}
__device__ __host__
int dbderivsourcemom (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
int status=0;
//dw[fencode3_db(p,ii,field)]=w[fencode3_db(p,ii,field)]+20+5*(2*direction+1);
dw[fencode3_db(p,ii,field)]=dw[fencode3_db(p,ii,field)]+dbsourcemom(dw,wd,w,p,ii,field,direction);
//dw[fencode3_db(p,ii,field)]=-ddotcurrentmom(dw,wd,w,p,ii,field,direction);
return ( status);
}
__device__ __host__
int dbderivsourceb (real *dw, real *wd, real *w, struct params *p,int *ii, int field, int direction) {
int status=0;
dw[fencode3_db(p,ii,field)]=dw[fencode3_db(p,ii,field)]+dbsourceb(dw,wd,w,p,ii,field,direction);
return ( status);
}
__device__ __host__
int dbderivsourceenergy (real *dw, real *wd, real *w, struct params *p,int *ii) {
int status=0;
int field=energy;
dw[fencode3_db(p,ii,field)]=dw[fencode3_db(p,ii,field)]+dbsourceenergy(dw,wd,w,p,ii);
return ( status);
}
//rho, mom1, mom2, mom3, energy, b1, b2, b3
__device__ __host__
void dbderivsource (real *dw, real *wd, real *w, struct params *p,int *ii, int field) {
//int status=0;
switch(field)
{
case rho:
dbderivsourcerho(dw,wd,w,p,ii);
break;
case mom1:
dbderivsourcemom(dw,wd,w,p,ii,field,0);
break;
case mom2:
dbderivsourcemom(dw,wd,w,p,ii,field,1);
break;
#ifdef USE_SAC_3D
case mom3:
dbderivsourcemom(dw,wd,w,p,ii,field,2);
break;
#endif
case energy:
dbderivsourceenergy(dw,wd,w,p,ii);
break;
case b1:
dbderivsourceb(dw,wd,w,p,ii,field,0);
break;
case b2:
dbderivsourceb(dw,wd,w,p,ii,field,1);
break;
#ifdef USE_SAC_3D
case b3:
dbderivsourceb(dw,wd,w,p,ii,field,2);
break;
#endif
}
//return ( status);
}
__global__ void divb_parallel(struct params *p, real *w, real *wmod,
real *dwn1, real *wd, int order,int ordero, real dt)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,k;
int ni=p->n[0];
int nj=p->n[1];
int ip,jp,ipg,jpg;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk,kp,kpg;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni/((p->npgp[1])*(p->npgp[0])));
jp=(iindex-(kp*(nj*ni/((p->npgp[1])*(p->npgp[0])))))/(ni/(p->npgp[0]));
ip=iindex-(kp*nj*ni/((p->npgp[1])*(p->npgp[0])))-(jp*(ni/(p->npgp[0])));
#endif
#if defined USE_SAC || defined ADIABHYDRO
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
#endif
int shift=order*NVAR*dimp;
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
iia[0]=ip*(p->npgp[0])+ipg;
iia[1]=jp*(p->npgp[1])+jpg;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp*(p->npgp[2])+kpg;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
#endif
for(int f=rho; f<=b2; f++)
dwn1[fencode3_db(p,iia,f)]=0;
}
__syncthreads();
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
iia[0]=ip*(p->npgp[0])+ipg;
iia[1]=jp*(p->npgp[1])+jpg;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp*(p->npgp[2])+kpg;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i>2 && j>2 && k>2 && i<(ni-2) && j<(nj-2) && k<(nk-2))
#else
if(i>2 && j>2 && i<(ni-2) && j<(nj-2))
#endif
//if(i>2 && j>2 && i<(ni-2) && j<(nj-2))
{
if(p->divbfix)
{
wd[fencode3_db(p,iia,divb)]=grad3d_db(wmod+order*NVAR*dimp,p,iia,b1,0)+grad3d_db(wmod+order*NVAR*dimp,p,iia,b2,1);
#ifdef USE_SAC
wd[fencode3_db(p,iia,divb)]+=grad3d_db(wmod+order*NVAR*dimp,p,iia,b1b,0)+grad3d_db(wmod+order*NVAR*dimp,p,iia,b2b,1);
#endif
#ifdef USE_SAC_3D
wd[fencode3_db(p,iia,divb)]+=grad3d_db(wmod+order*NVAR*dimp,p,iia,b3,0)+grad3d_db(wmod+order*NVAR*dimp,p,iia,b3b,1);
#endif
for(int f=rho; f<=b2; f++)
{
dbderivsource(dwn1,wd,wmod+order*NVAR*dimp,p,iia,f);
}
}
}
}
__syncthreads();
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
iia[0]=ip*(p->npgp[0])+ipg;
iia[1]=jp*(p->npgp[1])+jpg;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp*(p->npgp[2])+kpg;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i>1 && j >1 && k>1 && i<(ni-2) && j<(nj-2) && k<(nk-2))
#else
if(i>1 && j >1 && i<(ni-2) && j<(nj-2))
#endif
// if(i>1 && j >1 && i<(ni-2) && j<(nj-2))
{
if(p->divbfix)
{
for(int f=rho; f<=b2; f++)
// - sign here same as vac maybe a +
wmod[fencode3_db(p,iia,f)+(ordero*NVAR*dimp)]=wmod[fencode3_db(p,iia,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_db(p,iia,f)];
}
}
// }
}
__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_db(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cudivb(struct params **p, struct params **d_p, real **d_w, real **d_wmod, real **d_dwn1, real **d_wd, int order,int ordero, real dt)
{
int status=0;
dim3 dimBlock(dimblock, 1);
dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (((*p)->n[0])*((*p)->n[1])+numThreadsPerBlock-1) / numThreadsPerBlock;
hipLaunchKernelGGL(( divb_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p,*d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt);
//printf("called update\n");
hipDeviceSynchronize();
return status;
}
| 0c2cd9452088422e73dab18a102d9a14f4d39652.cu | #include "../include/cudapars.h"
#include "../include/paramssteeringtest1.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "../include/smaugcukernels.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "../include/gradops_db.cuh"
#include "../include/dervfields_db.cuh"
__device__ __host__
real dbsourcerho (real *dw, real *wd, real *w, struct params *p,int *ii) {
real src=0;
return src;
}
__device__ __host__
real dbsourcemom (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
real src=0;
switch(direction)
{
case 0:
src= -wd[fencode3_db(p,ii,divb)]*w[fencode3_db(p,ii,b1)];
break;
case 1:
src= -wd[fencode3_db(p,ii,divb)]*w[fencode3_db(p,ii,b2)];
break;
#ifdef USE_SAC_3D
case 2:
src= -wd[fencode3_db(p,ii,divb)]*w[fencode3_db(p,ii,b3)];
break;
#endif
}
return(isnan(src)?0:src);
}
__device__ __host__
real dbsourceb (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
real src=0;
switch(direction)
{
#ifdef USE_SAC
case 0:
src= -wd[fencode3_db(p,ii,divb)]*w[fencode3_db(p,ii,mom1)]/(w[fencode3_db(p,ii,rho)]+w[fencode3_db(p,ii,rhob)]);
break;
case 1:
src= -wd[fencode3_db(p,ii,divb)]*w[fencode3_db(p,ii,mom2)]/(w[fencode3_db(p,ii,rho)]+w[fencode3_db(p,ii,rhob)]);
break;
#endif
#ifdef USE_SAC_3D
case 2:
src= -wd[fencode3_db(p,ii,divb)]*w[fencode3_db(p,ii,mom3)]/(w[fencode3_db(p,ii,rho)]+w[fencode3_db(p,ii,rhob)]);
break;
#endif
}
return(isnan(src)?0:src);
}
__device__ __host__
real dbsourceenergy (real *dw, real *wd, real *w, struct params *p,int *ii) {
real src=0;
src= -wd[fencode3_db(p,ii,divb)]*wd[fencode3_db(p,ii,bdotv)];
return ( src);
}
__device__ __host__
int dbderivsourcerho (real *dw, real *wd, real *w, struct params *p,int *ii) {
int status=0;
int field=rho;
dw[fencode3_db(p,ii,field)]=dw[fencode3_db(p,ii,field)]+dbsourcerho(dw,wd,w,p,ii);
//dw[fencode3_db(p,ii,field)]=w[fencode3_db(p,ii,field)]+10;
return ( status);
}
__device__ __host__
int dbderivsourcemom (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
int status=0;
//dw[fencode3_db(p,ii,field)]=w[fencode3_db(p,ii,field)]+20+5*(2*direction+1);
dw[fencode3_db(p,ii,field)]=dw[fencode3_db(p,ii,field)]+dbsourcemom(dw,wd,w,p,ii,field,direction);
//dw[fencode3_db(p,ii,field)]=-ddotcurrentmom(dw,wd,w,p,ii,field,direction);
return ( status);
}
__device__ __host__
int dbderivsourceb (real *dw, real *wd, real *w, struct params *p,int *ii, int field, int direction) {
int status=0;
dw[fencode3_db(p,ii,field)]=dw[fencode3_db(p,ii,field)]+dbsourceb(dw,wd,w,p,ii,field,direction);
return ( status);
}
__device__ __host__
int dbderivsourceenergy (real *dw, real *wd, real *w, struct params *p,int *ii) {
int status=0;
int field=energy;
dw[fencode3_db(p,ii,field)]=dw[fencode3_db(p,ii,field)]+dbsourceenergy(dw,wd,w,p,ii);
return ( status);
}
//rho, mom1, mom2, mom3, energy, b1, b2, b3
__device__ __host__
void dbderivsource (real *dw, real *wd, real *w, struct params *p,int *ii, int field) {
//int status=0;
switch(field)
{
case rho:
dbderivsourcerho(dw,wd,w,p,ii);
break;
case mom1:
dbderivsourcemom(dw,wd,w,p,ii,field,0);
break;
case mom2:
dbderivsourcemom(dw,wd,w,p,ii,field,1);
break;
#ifdef USE_SAC_3D
case mom3:
dbderivsourcemom(dw,wd,w,p,ii,field,2);
break;
#endif
case energy:
dbderivsourceenergy(dw,wd,w,p,ii);
break;
case b1:
dbderivsourceb(dw,wd,w,p,ii,field,0);
break;
case b2:
dbderivsourceb(dw,wd,w,p,ii,field,1);
break;
#ifdef USE_SAC_3D
case b3:
dbderivsourceb(dw,wd,w,p,ii,field,2);
break;
#endif
}
//return ( status);
}
__global__ void divb_parallel(struct params *p, real *w, real *wmod,
real *dwn1, real *wd, int order,int ordero, real dt)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,k;
int ni=p->n[0];
int nj=p->n[1];
int ip,jp,ipg,jpg;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk,kp,kpg;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni/((p->npgp[1])*(p->npgp[0])));
jp=(iindex-(kp*(nj*ni/((p->npgp[1])*(p->npgp[0])))))/(ni/(p->npgp[0]));
ip=iindex-(kp*nj*ni/((p->npgp[1])*(p->npgp[0])))-(jp*(ni/(p->npgp[0])));
#endif
#if defined USE_SAC || defined ADIABHYDRO
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
#endif
int shift=order*NVAR*dimp;
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
iia[0]=ip*(p->npgp[0])+ipg;
iia[1]=jp*(p->npgp[1])+jpg;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp*(p->npgp[2])+kpg;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
#endif
for(int f=rho; f<=b2; f++)
dwn1[fencode3_db(p,iia,f)]=0;
}
__syncthreads();
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
iia[0]=ip*(p->npgp[0])+ipg;
iia[1]=jp*(p->npgp[1])+jpg;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp*(p->npgp[2])+kpg;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i>2 && j>2 && k>2 && i<(ni-2) && j<(nj-2) && k<(nk-2))
#else
if(i>2 && j>2 && i<(ni-2) && j<(nj-2))
#endif
//if(i>2 && j>2 && i<(ni-2) && j<(nj-2))
{
if(p->divbfix)
{
wd[fencode3_db(p,iia,divb)]=grad3d_db(wmod+order*NVAR*dimp,p,iia,b1,0)+grad3d_db(wmod+order*NVAR*dimp,p,iia,b2,1);
#ifdef USE_SAC
wd[fencode3_db(p,iia,divb)]+=grad3d_db(wmod+order*NVAR*dimp,p,iia,b1b,0)+grad3d_db(wmod+order*NVAR*dimp,p,iia,b2b,1);
#endif
#ifdef USE_SAC_3D
wd[fencode3_db(p,iia,divb)]+=grad3d_db(wmod+order*NVAR*dimp,p,iia,b3,0)+grad3d_db(wmod+order*NVAR*dimp,p,iia,b3b,1);
#endif
for(int f=rho; f<=b2; f++)
{
dbderivsource(dwn1,wd,wmod+order*NVAR*dimp,p,iia,f);
}
}
}
}
__syncthreads();
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
iia[0]=ip*(p->npgp[0])+ipg;
iia[1]=jp*(p->npgp[1])+jpg;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp*(p->npgp[2])+kpg;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i>1 && j >1 && k>1 && i<(ni-2) && j<(nj-2) && k<(nk-2))
#else
if(i>1 && j >1 && i<(ni-2) && j<(nj-2))
#endif
// if(i>1 && j >1 && i<(ni-2) && j<(nj-2))
{
if(p->divbfix)
{
for(int f=rho; f<=b2; f++)
// - sign here same as vac maybe a +
wmod[fencode3_db(p,iia,f)+(ordero*NVAR*dimp)]=wmod[fencode3_db(p,iia,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_db(p,iia,f)];
}
}
// }
}
__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_db(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cudivb(struct params **p, struct params **d_p, real **d_w, real **d_wmod, real **d_dwn1, real **d_wd, int order,int ordero, real dt)
{
int status=0;
dim3 dimBlock(dimblock, 1);
dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (((*p)->n[0])*((*p)->n[1])+numThreadsPerBlock-1) / numThreadsPerBlock;
divb_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt);
//printf("called update\n");
cudaThreadSynchronize();
return status;
}
|
00e12431890a615ace2381b841f09432fe779865.hip | // !!! This is a file automatically generated by hipify!!!
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include "sceneStructs.h"
#include "glm/glm.hpp"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#include <vector>
#if TORCH_HIP_VERSION >= 5000
#include <helper_math.h>
#else
#include <cutil_math.h>
#endif
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//TODO: IMPLEMENT THIS FUNCTION
//Function that does the initial raycast from the camera
__host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov){
ray r;
float theta = fov.x*PI/180.0f;
float phi = fov.y*PI/180.0f;
glm::vec3 A = glm::cross(view,up);
glm::vec3 B = glm::cross(A,view);
glm::vec3 M = eye + view;
glm::vec3 H = glm::normalize(A)*glm::length(view)*tan(theta);
glm::vec3 V = glm::normalize(B)*glm::length(view)*tan(phi);
float sx = (float)x/(resolution.x-1);
float sy = 1.0 - (float)y/(resolution.y-1);
glm::vec3 P = M + (2*sx-1)*H + (2*sy - 1)*V;
r.origin = eye;
r.direction = glm::normalize(P-eye);
return r;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
__host__ __device__ int findNearestGeometricIntersection(ray& r, glm::vec3& intersectionPoint,
glm::vec3& intersectionNormal,
staticGeom* geoms, int numberOfGeoms)
{
int nearestIntersectionObject = -1;
float nearestIntersectionDist = FLT_MAX;
for(int i=0; i<numberOfGeoms;++i)
{
if(geoms[i].type == SPHERE)
{
glm::vec3 iPoint;
glm::vec3 iNormal;
float t = sphereIntersectionTest(geoms[i],r,iPoint,iNormal);
if (t!= -1 && t<nearestIntersectionDist)
{
nearestIntersectionObject = i;
nearestIntersectionDist = t;
intersectionPoint = iPoint;
intersectionNormal = iNormal;
}
}
if(geoms[i].type == CUBE)
{
glm::vec3 iPoint;
glm::vec3 iNormal;
float t = boxIntersectionTest(geoms[i],r,iPoint,iNormal);
if (t!= -1 && t<nearestIntersectionDist)
{
nearestIntersectionObject = i;
nearestIntersectionDist = t;
intersectionPoint = iPoint;
intersectionNormal = iNormal;
}
}
}
return nearestIntersectionObject;
}
__host__ __device__ glm::vec3 shade(material& mtl, glm::vec3& shadePoint, glm::vec3& shadeNormal,
glm::vec3 eye,staticGeom* geoms,int numberOfGeoms)
{
glm::vec3 lightPos( 0,10,20);
glm::vec3 lightCol(1,1,1);
int numberOfLights = 1;
float Kd = 0.6;
float Ks = 0.2;
glm::vec3 color(0,0,0);
for (int i=0;i<numberOfLights;++i)
{
ray shadowFeeler;
shadowFeeler.direction = lightPos - shadePoint;
shadowFeeler.origin = shadePoint+ (float)RAY_BIAS_AMOUNT*shadowFeeler.direction;
glm::vec3 intersectionPoint,intersectionNormal;
int intersectionObjIndex = findNearestGeometricIntersection(shadowFeeler,intersectionPoint,
intersectionNormal,
geoms,numberOfGeoms);
if (intersectionObjIndex != -1)
continue;
float LN = glm::dot(shadowFeeler.direction,shadeNormal);
LN = utilityCore::clamp(LN,0,1);
glm::vec3 Rj = glm::normalize(glm::reflect(-shadowFeeler.direction,shadeNormal));
glm::vec3 V = glm::normalize(eye-shadePoint);
float RjV = glm::dot(Rj,V);
RjV = utilityCore::clamp(RjV,0,1);
color+= (Kd*mtl.color*LN + Ks*mtl.specularColor*(powf(RjV,mtl.specularExponent)));
}
return color;
}
//TODO: IMPLEMENT THIS FUNCTION
//Core raytracer kernel
__global__ void raytraceRay(glm::vec2 resolution, float time, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms, material* mtls, int numberOfMaterials,
light* lights,int numberOfLights){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if((x<=resolution.x && y<=resolution.y)){
ray r = raycastFromCameraKernel(resolution,time,x,y,cam.position,cam.view,cam.up,cam.fov);
glm::vec3 color(0,0,0);
float reflContribution = 1.0f;
for(int depth=0; depth<rayDepth; ++depth)
{
glm::vec3 intersectionPoint;
glm::vec3 intersectionNormal;
int nearestIntersectionObject = -1;
float nearestIntersectionDist = FLT_MAX;
for(int i=0; i<numberOfGeoms;++i)
{
if(geoms[i].type == SPHERE)
{
glm::vec3 iPoint;
glm::vec3 iNormal;
float t = sphereIntersectionTest(geoms[i],r,iPoint,iNormal);
if (t!= -1 && t<nearestIntersectionDist)
{
nearestIntersectionObject = i;
nearestIntersectionDist = t;
intersectionPoint = iPoint;
intersectionNormal = iNormal;
}
}
else if(geoms[i].type == CUBE)
{
glm::vec3 iPoint;
glm::vec3 iNormal;
float t = boxIntersectionTest(geoms[i],r,iPoint,iNormal);
if (t!= -1 && t<nearestIntersectionDist)
{
nearestIntersectionObject = i;
nearestIntersectionDist = t;
intersectionPoint = iPoint;
intersectionNormal = iNormal;
}
}
}
material mtl = mtls[geoms[nearestIntersectionObject].materialid];
for (int i=0;i<numberOfLights;++i)
{
ray shadowFeeler;
glm::vec3 lightPos = lights[i].position;
glm::vec3 lightCol = lights[i].color;
float lightIntensity = lights[i].intensity;
glm::vec3 ptToLight = lightPos-intersectionPoint;
shadowFeeler.direction = glm::normalize(ptToLight);
shadowFeeler.origin = intersectionPoint+ (float)RAY_BIAS_AMOUNT*shadowFeeler.direction;
bool occluded = false;
float distSquared = ptToLight.x*ptToLight.x +
ptToLight.y*ptToLight.y +
ptToLight.z*ptToLight.z;
for(int i=0; i<numberOfGeoms;++i)
{
if(geoms[i].type == SPHERE)
{
glm::vec3 iPoint;
glm::vec3 iNormal;
float t = sphereIntersectionTest(geoms[i],shadowFeeler,iPoint,iNormal);
if (t!= -1)
{
glm::vec3 intersectionPoint = getPointOnRay(shadowFeeler,t);
glm::vec3 ptToIntersection = intersectionPoint - shadowFeeler.origin;
float dsq = ptToIntersection.x*ptToIntersection.x +
ptToIntersection.y*ptToIntersection.y +
ptToIntersection.z*ptToIntersection.z;
if (dsq<distSquared)
{
occluded = true;
break;
}
}
}
if(geoms[i].type == CUBE)
{
glm::vec3 iPoint;
glm::vec3 iNormal;
float t = boxIntersectionTest(geoms[i],shadowFeeler,iPoint,iNormal);
if (t!= -1)
{
glm::vec3 intersectionPoint = getPointOnRay(shadowFeeler,t);
glm::vec3 ptToIntersection = intersectionPoint - shadowFeeler.origin;
float dsq = ptToIntersection.x*ptToIntersection.x +
ptToIntersection.y*ptToIntersection.y +
ptToIntersection.z*ptToIntersection.z;
if (dsq<distSquared)
{
occluded = true;
break;
}
}
}
}
if(occluded)
continue;
float LN = glm::dot(shadowFeeler.direction,intersectionNormal);
LN = max(LN,0.0f);
LN = min(LN,1.0f);
//glm::vec3 reflect = -shadowFeeler.direction-2.0f*intersectionNormal*
// glm::dot(-shadowFeeler.direction,intersectionNormal);
glm::vec3 reflectedDir = calculateReflectionDirection(intersectionNormal,-shadowFeeler.direction);
glm::vec3 Rj = glm::normalize(reflectedDir);
glm::vec3 V = glm::normalize(cam.position-intersectionPoint);
float RjV = glm::dot(Rj,V);
RjV = max(RjV,0.0f);
RjV = min(RjV,1.0f);
//color+= Kd*mtl.color*LN;
//color += glm::vec3(shadowFeeler.direction);
//color+= glm::vec3( intersectionNormal.x,intersectionNormal.y,intersectionNormal.z);
//color+= glm::vec3( fabs(intersectionNormal.x),fabs(intersectionNormal.y),fabs(intersectionNormal.z));
//color+= glm::vec3(LN,LN,LN);
glm::vec3 diffColor = mtl.diffuseCoefficient*lightIntensity*LN*mtl.color;
glm::vec3 specColor = mtl.specularCoefficient*mtl.specularColor*(powf(RjV,mtl.specularExponent));
glm::vec3 localColor = diffColor+specColor;
if(!mtl.hasReflective)
{
color+= reflContribution*localColor;
break;
}
else
{
color+= reflContribution*(1.0f-mtl.specularCoefficient)*localColor;
reflContribution *= mtl.specularCoefficient;
r.direction = reflectedDir;
r.origin = intersectionPoint;
}
}
}
colors[index] = color;
}
}
//TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms,light* lights, int numberOfLights){
int traceDepth = 1; //determines how many bounces the raytracer traces
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
//send image to GPU
glm::vec3* cudaimage = NULL;
hipMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
hipMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyHostToDevice);
//package geometry and materials and sent to GPU
staticGeom* geomList = new staticGeom[numberOfGeoms];
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
geomList[i] = newStaticGeom;
}
staticGeom* cudageoms = NULL;
hipMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
hipMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), hipMemcpyHostToDevice);
//package materials
material* cudamtls = NULL;
hipMalloc( (void**)&cudamtls, numberOfMaterials*sizeof(material));
hipMemcpy(cudamtls,materials,numberOfMaterials*sizeof(material),hipMemcpyHostToDevice);
//package lights
light* cudalights = NULL;
hipMalloc( (void**)&cudalights, numberOfLights*sizeof(light));
hipMemcpy(cudalights,lights,numberOfLights*sizeof(light),hipMemcpyHostToDevice);
//package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
//kernel launches
hipLaunchKernelGGL(( raytraceRay), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, renderCam->resolution, (float)iterations, cam, traceDepth, cudaimage, cudageoms, numberOfGeoms,cudamtls,numberOfMaterials,cudalights,numberOfLights);
hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, renderCam->resolution, cudaimage);
//retrieve image from GPU
hipMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyDeviceToHost);
//free up stuff, or else we'll leak memory like a madman
hipFree( cudaimage );
hipFree( cudageoms );
hipFree(cudamtls);
hipFree(cudalights);
delete geomList;
// make certain the kernel has completed
hipDeviceSynchronize();
checkCUDAError("Kernel failed!");
}
| 00e12431890a615ace2381b841f09432fe779865.cu | // CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include "sceneStructs.h"
#include "glm/glm.hpp"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#include <vector>
#if CUDA_VERSION >= 5000
#include <helper_math.h>
#else
#include <cutil_math.h>
#endif
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//TODO: IMPLEMENT THIS FUNCTION
//Function that does the initial raycast from the camera
__host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov){
ray r;
float theta = fov.x*PI/180.0f;
float phi = fov.y*PI/180.0f;
glm::vec3 A = glm::cross(view,up);
glm::vec3 B = glm::cross(A,view);
glm::vec3 M = eye + view;
glm::vec3 H = glm::normalize(A)*glm::length(view)*tan(theta);
glm::vec3 V = glm::normalize(B)*glm::length(view)*tan(phi);
float sx = (float)x/(resolution.x-1);
float sy = 1.0 - (float)y/(resolution.y-1);
glm::vec3 P = M + (2*sx-1)*H + (2*sy - 1)*V;
r.origin = eye;
r.direction = glm::normalize(P-eye);
return r;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
__host__ __device__ int findNearestGeometricIntersection(ray& r, glm::vec3& intersectionPoint,
glm::vec3& intersectionNormal,
staticGeom* geoms, int numberOfGeoms)
{
int nearestIntersectionObject = -1;
float nearestIntersectionDist = FLT_MAX;
for(int i=0; i<numberOfGeoms;++i)
{
if(geoms[i].type == SPHERE)
{
glm::vec3 iPoint;
glm::vec3 iNormal;
float t = sphereIntersectionTest(geoms[i],r,iPoint,iNormal);
if (t!= -1 && t<nearestIntersectionDist)
{
nearestIntersectionObject = i;
nearestIntersectionDist = t;
intersectionPoint = iPoint;
intersectionNormal = iNormal;
}
}
if(geoms[i].type == CUBE)
{
glm::vec3 iPoint;
glm::vec3 iNormal;
float t = boxIntersectionTest(geoms[i],r,iPoint,iNormal);
if (t!= -1 && t<nearestIntersectionDist)
{
nearestIntersectionObject = i;
nearestIntersectionDist = t;
intersectionPoint = iPoint;
intersectionNormal = iNormal;
}
}
}
return nearestIntersectionObject;
}
__host__ __device__ glm::vec3 shade(material& mtl, glm::vec3& shadePoint, glm::vec3& shadeNormal,
glm::vec3 eye,staticGeom* geoms,int numberOfGeoms)
{
glm::vec3 lightPos( 0,10,20);
glm::vec3 lightCol(1,1,1);
int numberOfLights = 1;
float Kd = 0.6;
float Ks = 0.2;
glm::vec3 color(0,0,0);
for (int i=0;i<numberOfLights;++i)
{
ray shadowFeeler;
shadowFeeler.direction = lightPos - shadePoint;
shadowFeeler.origin = shadePoint+ (float)RAY_BIAS_AMOUNT*shadowFeeler.direction;
glm::vec3 intersectionPoint,intersectionNormal;
int intersectionObjIndex = findNearestGeometricIntersection(shadowFeeler,intersectionPoint,
intersectionNormal,
geoms,numberOfGeoms);
if (intersectionObjIndex != -1)
continue;
float LN = glm::dot(shadowFeeler.direction,shadeNormal);
LN = utilityCore::clamp(LN,0,1);
glm::vec3 Rj = glm::normalize(glm::reflect(-shadowFeeler.direction,shadeNormal));
glm::vec3 V = glm::normalize(eye-shadePoint);
float RjV = glm::dot(Rj,V);
RjV = utilityCore::clamp(RjV,0,1);
color+= (Kd*mtl.color*LN + Ks*mtl.specularColor*(powf(RjV,mtl.specularExponent)));
}
return color;
}
//TODO: IMPLEMENT THIS FUNCTION
//Core raytracer kernel
__global__ void raytraceRay(glm::vec2 resolution, float time, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms, material* mtls, int numberOfMaterials,
light* lights,int numberOfLights){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if((x<=resolution.x && y<=resolution.y)){
ray r = raycastFromCameraKernel(resolution,time,x,y,cam.position,cam.view,cam.up,cam.fov);
glm::vec3 color(0,0,0);
float reflContribution = 1.0f;
for(int depth=0; depth<rayDepth; ++depth)
{
glm::vec3 intersectionPoint;
glm::vec3 intersectionNormal;
int nearestIntersectionObject = -1;
float nearestIntersectionDist = FLT_MAX;
for(int i=0; i<numberOfGeoms;++i)
{
if(geoms[i].type == SPHERE)
{
glm::vec3 iPoint;
glm::vec3 iNormal;
float t = sphereIntersectionTest(geoms[i],r,iPoint,iNormal);
if (t!= -1 && t<nearestIntersectionDist)
{
nearestIntersectionObject = i;
nearestIntersectionDist = t;
intersectionPoint = iPoint;
intersectionNormal = iNormal;
}
}
else if(geoms[i].type == CUBE)
{
glm::vec3 iPoint;
glm::vec3 iNormal;
float t = boxIntersectionTest(geoms[i],r,iPoint,iNormal);
if (t!= -1 && t<nearestIntersectionDist)
{
nearestIntersectionObject = i;
nearestIntersectionDist = t;
intersectionPoint = iPoint;
intersectionNormal = iNormal;
}
}
}
material mtl = mtls[geoms[nearestIntersectionObject].materialid];
for (int i=0;i<numberOfLights;++i)
{
ray shadowFeeler;
glm::vec3 lightPos = lights[i].position;
glm::vec3 lightCol = lights[i].color;
float lightIntensity = lights[i].intensity;
glm::vec3 ptToLight = lightPos-intersectionPoint;
shadowFeeler.direction = glm::normalize(ptToLight);
shadowFeeler.origin = intersectionPoint+ (float)RAY_BIAS_AMOUNT*shadowFeeler.direction;
bool occluded = false;
float distSquared = ptToLight.x*ptToLight.x +
ptToLight.y*ptToLight.y +
ptToLight.z*ptToLight.z;
for(int i=0; i<numberOfGeoms;++i)
{
if(geoms[i].type == SPHERE)
{
glm::vec3 iPoint;
glm::vec3 iNormal;
float t = sphereIntersectionTest(geoms[i],shadowFeeler,iPoint,iNormal);
if (t!= -1)
{
glm::vec3 intersectionPoint = getPointOnRay(shadowFeeler,t);
glm::vec3 ptToIntersection = intersectionPoint - shadowFeeler.origin;
float dsq = ptToIntersection.x*ptToIntersection.x +
ptToIntersection.y*ptToIntersection.y +
ptToIntersection.z*ptToIntersection.z;
if (dsq<distSquared)
{
occluded = true;
break;
}
}
}
if(geoms[i].type == CUBE)
{
glm::vec3 iPoint;
glm::vec3 iNormal;
float t = boxIntersectionTest(geoms[i],shadowFeeler,iPoint,iNormal);
if (t!= -1)
{
glm::vec3 intersectionPoint = getPointOnRay(shadowFeeler,t);
glm::vec3 ptToIntersection = intersectionPoint - shadowFeeler.origin;
float dsq = ptToIntersection.x*ptToIntersection.x +
ptToIntersection.y*ptToIntersection.y +
ptToIntersection.z*ptToIntersection.z;
if (dsq<distSquared)
{
occluded = true;
break;
}
}
}
}
if(occluded)
continue;
float LN = glm::dot(shadowFeeler.direction,intersectionNormal);
LN = max(LN,0.0f);
LN = min(LN,1.0f);
//glm::vec3 reflect = -shadowFeeler.direction-2.0f*intersectionNormal*
// glm::dot(-shadowFeeler.direction,intersectionNormal);
glm::vec3 reflectedDir = calculateReflectionDirection(intersectionNormal,-shadowFeeler.direction);
glm::vec3 Rj = glm::normalize(reflectedDir);
glm::vec3 V = glm::normalize(cam.position-intersectionPoint);
float RjV = glm::dot(Rj,V);
RjV = max(RjV,0.0f);
RjV = min(RjV,1.0f);
//color+= Kd*mtl.color*LN;
//color += glm::vec3(shadowFeeler.direction);
//color+= glm::vec3( intersectionNormal.x,intersectionNormal.y,intersectionNormal.z);
//color+= glm::vec3( fabs(intersectionNormal.x),fabs(intersectionNormal.y),fabs(intersectionNormal.z));
//color+= glm::vec3(LN,LN,LN);
glm::vec3 diffColor = mtl.diffuseCoefficient*lightIntensity*LN*mtl.color;
glm::vec3 specColor = mtl.specularCoefficient*mtl.specularColor*(powf(RjV,mtl.specularExponent));
glm::vec3 localColor = diffColor+specColor;
if(!mtl.hasReflective)
{
color+= reflContribution*localColor;
break;
}
else
{
color+= reflContribution*(1.0f-mtl.specularCoefficient)*localColor;
reflContribution *= mtl.specularCoefficient;
r.direction = reflectedDir;
r.origin = intersectionPoint;
}
}
}
colors[index] = color;
}
}
//TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms,light* lights, int numberOfLights){
int traceDepth = 1; //determines how many bounces the raytracer traces
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
//send image to GPU
glm::vec3* cudaimage = NULL;
cudaMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
cudaMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyHostToDevice);
//package geometry and materials and sent to GPU
staticGeom* geomList = new staticGeom[numberOfGeoms];
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
geomList[i] = newStaticGeom;
}
staticGeom* cudageoms = NULL;
cudaMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
cudaMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), cudaMemcpyHostToDevice);
//package materials
material* cudamtls = NULL;
cudaMalloc( (void**)&cudamtls, numberOfMaterials*sizeof(material));
cudaMemcpy(cudamtls,materials,numberOfMaterials*sizeof(material),cudaMemcpyHostToDevice);
//package lights
light* cudalights = NULL;
cudaMalloc( (void**)&cudalights, numberOfLights*sizeof(light));
cudaMemcpy(cudalights,lights,numberOfLights*sizeof(light),cudaMemcpyHostToDevice);
//package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
//kernel launches
raytraceRay<<<fullBlocksPerGrid, threadsPerBlock>>>(renderCam->resolution, (float)iterations, cam, traceDepth, cudaimage, cudageoms, numberOfGeoms,cudamtls,numberOfMaterials,cudalights,numberOfLights);
sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, renderCam->resolution, cudaimage);
//retrieve image from GPU
cudaMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyDeviceToHost);
//free up stuff, or else we'll leak memory like a madman
cudaFree( cudaimage );
cudaFree( cudageoms );
cudaFree(cudamtls);
cudaFree(cudalights);
delete geomList;
// make certain the kernel has completed
cudaThreadSynchronize();
checkCUDAError("Kernel failed!");
}
|
452a51c9d5a81d434135b8fd2f77a8dd5245d7a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
{
__global__ void tx1mx_32(const int lengthX, const float *t, const float *x, float *z)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthX)
{
z[i] += t[i]*x[i]*(1.0-x[i]);
}
}
} | 452a51c9d5a81d434135b8fd2f77a8dd5245d7a4.cu | extern "C"
{
__global__ void tx1mx_32(const int lengthX, const float *t, const float *x, float *z)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthX)
{
z[i] += t[i]*x[i]*(1.0-x[i]);
}
}
} |
4806078c66ca610caf39a51df6ad9ab71fee4c48.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
//#define cfd_SUPER_BLOCKS_PER_SM 5
//const int BLOCK_SIZE = 256;
const int cfd_nBlksPerCluster = 16;
const int cfd_nAtom = BLOCK_SIZE * MSIZE;
const int cfd_maxNeighbors = 8;
int line[100000][6];
int yy = 0;
inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom,
int* neighborList, int blockSz)
{
//create non-uniform data sharing
//but avoid that tasks sharing the same data are neighbor tasks by randomization
vector<int> atomInds(nAtom);
vector<int> blkInds((nAtom+blockSz-1)/blockSz);
for(int i=0; i<blkInds.size(); ++i)
blkInds[i] = i;
random_shuffle(blkInds.begin(), blkInds.end());
int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int));
for(int i=0; i<blkInds.size(); ++i)
blkOrder[i] = blkInds[i];
int j=0;
for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it)
{
int blkInd = *it;
for(int i=0; i<blockSz; ++i)
atomInds[j++] = blkInd*blockSz + i;
}
int superBlockSz = blockSz * cfd_nBlksPerCluster;
// Build Neighbor List
for (int i = 0; i < nAtom; i++)
{
int start = i - i%superBlockSz; //difference is here
//int end = i + (superBlockSz - i%superBlockSz)-1;
int nNeighbors = 0;
do {
int j = start + rand() % superBlockSz;
if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor
neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j];
nNeighbors ++;
} while(nNeighbors<cfd_maxNeighbors);
}
return blkOrder;
}
#define GAMMA 1.4f
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define NDIM 3
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
__host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__host__ __device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
void check_cfd(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
//const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int i=0;i<MSIZE*BLOCK_SIZE;i++){
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
/*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\
((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/
if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\
((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01)))
{printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\
fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\
fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\
fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\
fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy);
return;}
}
printf("GOOD! passed!\n");
return;
}
void cfd_kernel_cpu(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* __restrict__ mz, float* density_energy, float* fluxes,int *d_flag)
{
//FILE *f = fopen("hha.txt","w");
for(int tx =0;tx<256/2;tx++){
const float smoothing_coefficient = float(0.2f);
const int i = (0 + tx);
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
//{
//line[yy][0]=0;line[yy][1]=0;line[yy][2]=0;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i;
//yy++;}
//fprintf(f,"0 0 0 0 %d %d\n",tx,i);}
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
/*{
// fprintf(f,"1 0 0 0 %d %d\n",tx,i);
// fprintf(f,"2 0 0 0 %d %d\n",tx,i);
//fprintf(f,"3 0 0 0 %d %d\n",tx,i);}
line[yy][0]=1;line[yy][1]=0;line[yy][2]=0;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i;
yy++;
line[yy][0]=2;line[yy][1]=0;line[yy][2]=0;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i;
yy++;
line[yy][0]=3;line[yy][1]=0;line[yy][2]=0;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i;
yy++;}
*/
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
// fprintf(f,"4 0 0 0 %d %d\n",tx,i);
//line[yy][0]=4;line[yy][1]=0;line[yy][2]=0;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i;
//yy++;
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
{
// fprintf(f,"5 0 0 %d %d %d\n",j,tx,i+j*nelr);}
//line[yy][0]=5;line[yy][1]=0;line[yy][2]=0;line[yy][3]=j;line[yy][4]=tx;line[yy][5]=i+j*nelr;
//yy++;
}
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
{
// fprintf(f,"6 0 0 %d %d %d\n",j,tx,i+j*nelr);
// fprintf(f,"6 0 1 %d %d %d\n",j,tx,i+(j+cfd_maxNeighbors)*nelr);
//fprintf(f,"6 0 2 %d %d %d\n",j,tx,i+(j+2*cfd_maxNeighbors)*nelr);
//line[yy][0]=6;line[yy][1]=0;line[yy][2]=0;line[yy][3]=j;line[yy][4]=tx;line[yy][5]=i+j*nelr;
//yy++;
//line[yy][0]=6;line[yy][1]=0;line[yy][2]=1;line[yy][3]=j;line[yy][4]=tx;line[yy][5]=i+j*nelr;
//yy++;
//line[yy][0]=6;line[yy][1]=0;line[yy][2]=1;line[yy][3]=j;line[yy][4]=tx;line[yy][5]=i+j*nelr;
//yy++;
}
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
/* fprintf(f,"0 0 1 %d %d %d\n",j,tx,nb);
fprintf(f,"1 0 1 %d %d %d\n",j,tx,nb);
fprintf(f,"2 0 1 %d %d %d\n",j,tx,nb);
fprintf(f,"3 0 1 %d %d %d\n",j,tx,nb);
fprintf(f,"4 0 1 %d %d %d\n",j,tx,nb);
*/
line[yy][0]=0;line[yy][1]=0;line[yy][2]=1;line[yy][3]=j;line[yy][4]=tx;line[yy][5]=nb;
yy++;
line[yy][0]=1;line[yy][1]=0;line[yy][2]=1;line[yy][3]=j;line[yy][4]=tx;line[yy][5]=nb;
yy++;
line[yy][0]=2;line[yy][1]=0;line[yy][2]=1;line[yy][3]=j;line[yy][4]=tx;line[yy][5]=nb;
yy++;
line[yy][0]=3;line[yy][1]=0;line[yy][2]=1;line[yy][3]=j;line[yy][4]=tx;line[yy][5]=nb;
yy++;
line[yy][0]=4;line[yy][1]=0;line[yy][2]=1;line[yy][3]=j;line[yy][4]=tx;line[yy][5]=nb;
yy++;
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
{
/*
fprintf(f,"7 1 0 0 %d %d\n",tx,i + VAR_DENSITY*nelr);
fprintf(f,"7 1 1 0 %d %d\n",tx,i + (VAR_MOMENTUM+0)*nelr);
fprintf(f,"7 1 2 0 %d %d\n",tx,i + (VAR_MOMENTUM+1)*nelr);
fprintf(f,"7 1 3 0 %d %d\n",tx,i + (VAR_MOMENTUM+2)*nelr);
fprintf(f,"7 1 4 0 %d %d\n",tx,i +VAR_DENSITY_ENERGY*nelr);*/
line[yy][0]=7;line[yy][1]=1;line[yy][2]=0;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i+ VAR_DENSITY*nelr;
yy++;
line[yy][0]=7;line[yy][1]=1;line[yy][2]=1;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i + (VAR_MOMENTUM+0)*nelr;
yy++;
line[yy][0]=7;line[yy][1]=1;line[yy][2]=2;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i + (VAR_MOMENTUM+1)*nelr;
yy++;
line[yy][0]=7;line[yy][1]=1;line[yy][2]=3;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i + (VAR_MOMENTUM+2)*nelr;
yy++;
line[yy][0]=7;line[yy][1]=1;line[yy][2]=4;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i +VAR_DENSITY_ENERGY*nelr;
yy++;
}
}
}
__global__ void cfd_kernel(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* __restrict__ mz, float* density_energy, float* fluxes,int *d_flag)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
}
int main(int argc, char **argv) {
struct timespec t1,t2,t3,t4;
clock_gettime(CLOCK_MONOTONIC,&t1);
size_t limit = 1024*1024*1024;
hipDeviceSetLimit(hipLimitPrintfFifoSize,limit);
srand(2013);
// Allocate problem data on host
//posVecType* position;
//forceVecType* force;
float *density;
float *mx;
float *my;
float *mz;
float *density_energy;
float *normals;
float *fluxes;
int* cfd_neighborList;
hipHostMalloc((void**)&density, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&mx, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&my, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&mz, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&density_energy, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
hipHostMalloc((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float));
hipHostMalloc((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
// Allocate device memory for position and force
//forceVecType* d_force;
//posVecType* d_position;
float *d_density;
float *d_mx;
float *d_my;
float *d_mz;
float *d_density_energy;
float *d_normals;
float *d_fluxes;
hipMalloc((void**)&d_density, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_mx, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_my, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_mz, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
hipMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float));
hipMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float));
//hipMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType));
// Allocate device memory for neighbor list
int* d_cfd_neighborList;
hipMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
printf("%d %d %d %d %d %d %d %d\n",cfd_nAtom*sizeof(float),cfd_nAtom*sizeof(float),cfd_nAtom*sizeof(float),cfd_nAtom*sizeof(float),cfd_nAtom*sizeof(float),cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float),cfd_nAtom*NVAR*sizeof(float),cfd_nAtom*cfd_maxNeighbors*sizeof(int));
//cout << "Initializing test problem (this can take several "
// "minutes for large problems)\n";
// Initialize positions -- random distribution in cubic domain
// domainEdge constant specifies edge length
for (int i = 0; i < cfd_nAtom; i++)
{
density[i] = (float)(drand48());
density_energy[i] = (float)(drand48() );
mx[i] = (float)(drand48() );
my[i] = (float)(drand48() );
mz[i] = (float)(drand48() );
/*
density[i] = 1.1+i*0.01;
density_energy[i] = 1.1+i*0.01;
mx[i] = 1.1+i*0.01;
my[i] = 1.1+i*0.01;
mz[i] = 1.1+i*0.01;
*/
}
for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i)
normals[i] = (float)(drand48());
cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, BLOCK_SIZE);
hipMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), hipMemcpyHostToDevice);
// Copy data to GPU
hipMemcpy(d_density, density, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_my, my, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), hipMemcpyHostToDevice);
hipSetDeviceFlags(hipDeviceMapHost);
int *flag_cfd,*d_flag_cfd;
hipHostMalloc((void**)&flag_cfd,sizeof( int),hipHostMallocMapped);
hipHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0);
clock_gettime(CLOCK_MONOTONIC,&t3);
cfd_kernel_cpu(cfd_nAtom, cfd_neighborList, normals, density, mx, my, mz, density_energy,
fluxes,flag_cfd);
clock_gettime(CLOCK_MONOTONIC,&t4);
printf("profiling time: %f\n", (t4.tv_sec-t3.tv_sec+(t4.tv_nsec-t3.tv_nsec)/1.e9));
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
int cfd_gridSize = (cfd_nAtom-1+BLOCK_SIZE) / BLOCK_SIZE;
hipLaunchKernelGGL(( cfd_kernel), dim3(cfd_gridSize), dim3(BLOCK_SIZE), 0, 0, cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes,d_flag_cfd);
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time/ITERATIONS << endl;
hipMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), hipMemcpyDeviceToHost);
check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes);
//TODO:verified on small inputs
/*
ifstream fluxesF("../org/fluxes.txt");
for(int i=0; i<cfd_nAtom*NVAR; ++i) {
float f;
fluxesF >> f;
if(abs(f - fluxes[i]) > 0.001) {
fprintf(stderr, "Test failed! i = %d\n", i);
return 1;
}
}*/
// printf("Test passed!\n");
// fluxesF.close();
clock_gettime(CLOCK_MONOTONIC,&t2);
printf("total time: %f\n", (t2.tv_sec-t1.tv_sec+(t2.tv_nsec-t1.tv_nsec)/1.e9));
return 0;
}
| 4806078c66ca610caf39a51df6ad9ab71fee4c48.cu |
#include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
//#define cfd_SUPER_BLOCKS_PER_SM 5
//const int BLOCK_SIZE = 256;
const int cfd_nBlksPerCluster = 16;
const int cfd_nAtom = BLOCK_SIZE * MSIZE;
const int cfd_maxNeighbors = 8;
int line[100000][6];
int yy = 0;
inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom,
int* neighborList, int blockSz)
{
//create non-uniform data sharing
//but avoid that tasks sharing the same data are neighbor tasks by randomization
vector<int> atomInds(nAtom);
vector<int> blkInds((nAtom+blockSz-1)/blockSz);
for(int i=0; i<blkInds.size(); ++i)
blkInds[i] = i;
random_shuffle(blkInds.begin(), blkInds.end());
int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int));
for(int i=0; i<blkInds.size(); ++i)
blkOrder[i] = blkInds[i];
int j=0;
for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it)
{
int blkInd = *it;
for(int i=0; i<blockSz; ++i)
atomInds[j++] = blkInd*blockSz + i;
}
int superBlockSz = blockSz * cfd_nBlksPerCluster;
// Build Neighbor List
for (int i = 0; i < nAtom; i++)
{
int start = i - i%superBlockSz; //difference is here
//int end = i + (superBlockSz - i%superBlockSz)-1;
int nNeighbors = 0;
do {
int j = start + rand() % superBlockSz;
if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor
neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j];
nNeighbors ++;
} while(nNeighbors<cfd_maxNeighbors);
}
return blkOrder;
}
#define GAMMA 1.4f
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define NDIM 3
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
__host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__host__ __device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
void check_cfd(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
//const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int i=0;i<MSIZE*BLOCK_SIZE;i++){
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
/*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\
((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/
if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\
((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01)))
{printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\
fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\
fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\
fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\
fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy);
return;}
}
printf("GOOD! passed!\n");
return;
}
void cfd_kernel_cpu(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* __restrict__ mz, float* density_energy, float* fluxes,int *d_flag)
{
//FILE *f = fopen("hha.txt","w");
for(int tx =0;tx<256/2;tx++){
const float smoothing_coefficient = float(0.2f);
const int i = (0 + tx);
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
//{
//line[yy][0]=0;line[yy][1]=0;line[yy][2]=0;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i;
//yy++;}
//fprintf(f,"0 0 0 0 %d %d\n",tx,i);}
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
/*{
// fprintf(f,"1 0 0 0 %d %d\n",tx,i);
// fprintf(f,"2 0 0 0 %d %d\n",tx,i);
//fprintf(f,"3 0 0 0 %d %d\n",tx,i);}
line[yy][0]=1;line[yy][1]=0;line[yy][2]=0;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i;
yy++;
line[yy][0]=2;line[yy][1]=0;line[yy][2]=0;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i;
yy++;
line[yy][0]=3;line[yy][1]=0;line[yy][2]=0;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i;
yy++;}
*/
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
// fprintf(f,"4 0 0 0 %d %d\n",tx,i);
//line[yy][0]=4;line[yy][1]=0;line[yy][2]=0;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i;
//yy++;
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
{
// fprintf(f,"5 0 0 %d %d %d\n",j,tx,i+j*nelr);}
//line[yy][0]=5;line[yy][1]=0;line[yy][2]=0;line[yy][3]=j;line[yy][4]=tx;line[yy][5]=i+j*nelr;
//yy++;
}
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
{
// fprintf(f,"6 0 0 %d %d %d\n",j,tx,i+j*nelr);
// fprintf(f,"6 0 1 %d %d %d\n",j,tx,i+(j+cfd_maxNeighbors)*nelr);
//fprintf(f,"6 0 2 %d %d %d\n",j,tx,i+(j+2*cfd_maxNeighbors)*nelr);
//line[yy][0]=6;line[yy][1]=0;line[yy][2]=0;line[yy][3]=j;line[yy][4]=tx;line[yy][5]=i+j*nelr;
//yy++;
//line[yy][0]=6;line[yy][1]=0;line[yy][2]=1;line[yy][3]=j;line[yy][4]=tx;line[yy][5]=i+j*nelr;
//yy++;
//line[yy][0]=6;line[yy][1]=0;line[yy][2]=1;line[yy][3]=j;line[yy][4]=tx;line[yy][5]=i+j*nelr;
//yy++;
}
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
/* fprintf(f,"0 0 1 %d %d %d\n",j,tx,nb);
fprintf(f,"1 0 1 %d %d %d\n",j,tx,nb);
fprintf(f,"2 0 1 %d %d %d\n",j,tx,nb);
fprintf(f,"3 0 1 %d %d %d\n",j,tx,nb);
fprintf(f,"4 0 1 %d %d %d\n",j,tx,nb);
*/
line[yy][0]=0;line[yy][1]=0;line[yy][2]=1;line[yy][3]=j;line[yy][4]=tx;line[yy][5]=nb;
yy++;
line[yy][0]=1;line[yy][1]=0;line[yy][2]=1;line[yy][3]=j;line[yy][4]=tx;line[yy][5]=nb;
yy++;
line[yy][0]=2;line[yy][1]=0;line[yy][2]=1;line[yy][3]=j;line[yy][4]=tx;line[yy][5]=nb;
yy++;
line[yy][0]=3;line[yy][1]=0;line[yy][2]=1;line[yy][3]=j;line[yy][4]=tx;line[yy][5]=nb;
yy++;
line[yy][0]=4;line[yy][1]=0;line[yy][2]=1;line[yy][3]=j;line[yy][4]=tx;line[yy][5]=nb;
yy++;
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
{
/*
fprintf(f,"7 1 0 0 %d %d\n",tx,i + VAR_DENSITY*nelr);
fprintf(f,"7 1 1 0 %d %d\n",tx,i + (VAR_MOMENTUM+0)*nelr);
fprintf(f,"7 1 2 0 %d %d\n",tx,i + (VAR_MOMENTUM+1)*nelr);
fprintf(f,"7 1 3 0 %d %d\n",tx,i + (VAR_MOMENTUM+2)*nelr);
fprintf(f,"7 1 4 0 %d %d\n",tx,i +VAR_DENSITY_ENERGY*nelr);*/
line[yy][0]=7;line[yy][1]=1;line[yy][2]=0;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i+ VAR_DENSITY*nelr;
yy++;
line[yy][0]=7;line[yy][1]=1;line[yy][2]=1;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i + (VAR_MOMENTUM+0)*nelr;
yy++;
line[yy][0]=7;line[yy][1]=1;line[yy][2]=2;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i + (VAR_MOMENTUM+1)*nelr;
yy++;
line[yy][0]=7;line[yy][1]=1;line[yy][2]=3;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i + (VAR_MOMENTUM+2)*nelr;
yy++;
line[yy][0]=7;line[yy][1]=1;line[yy][2]=4;line[yy][3]=0;line[yy][4]=tx;line[yy][5]=i +VAR_DENSITY_ENERGY*nelr;
yy++;
}
}
}
__global__ void cfd_kernel(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* __restrict__ mz, float* density_energy, float* fluxes,int *d_flag)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
}
int main(int argc, char **argv) {
struct timespec t1,t2,t3,t4;
clock_gettime(CLOCK_MONOTONIC,&t1);
size_t limit = 1024*1024*1024;
cudaDeviceSetLimit(cudaLimitPrintfFifoSize,limit);
srand(2013);
// Allocate problem data on host
//posVecType* position;
//forceVecType* force;
float *density;
float *mx;
float *my;
float *mz;
float *density_energy;
float *normals;
float *fluxes;
int* cfd_neighborList;
cudaMallocHost((void**)&density, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&mx, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&my, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&mz, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&density_energy, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
cudaMallocHost((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float));
cudaMallocHost((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
// Allocate device memory for position and force
//forceVecType* d_force;
//posVecType* d_position;
float *d_density;
float *d_mx;
float *d_my;
float *d_mz;
float *d_density_energy;
float *d_normals;
float *d_fluxes;
cudaMalloc((void**)&d_density, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_mx, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_my, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_mz, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
cudaMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float));
cudaMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float));
//cudaMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType));
// Allocate device memory for neighbor list
int* d_cfd_neighborList;
cudaMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
printf("%d %d %d %d %d %d %d %d\n",cfd_nAtom*sizeof(float),cfd_nAtom*sizeof(float),cfd_nAtom*sizeof(float),cfd_nAtom*sizeof(float),cfd_nAtom*sizeof(float),cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float),cfd_nAtom*NVAR*sizeof(float),cfd_nAtom*cfd_maxNeighbors*sizeof(int));
//cout << "Initializing test problem (this can take several "
// "minutes for large problems)\n";
// Initialize positions -- random distribution in cubic domain
// domainEdge constant specifies edge length
for (int i = 0; i < cfd_nAtom; i++)
{
density[i] = (float)(drand48());
density_energy[i] = (float)(drand48() );
mx[i] = (float)(drand48() );
my[i] = (float)(drand48() );
mz[i] = (float)(drand48() );
/*
density[i] = 1.1+i*0.01;
density_energy[i] = 1.1+i*0.01;
mx[i] = 1.1+i*0.01;
my[i] = 1.1+i*0.01;
mz[i] = 1.1+i*0.01;
*/
}
for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i)
normals[i] = (float)(drand48());
cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, BLOCK_SIZE);
cudaMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), cudaMemcpyHostToDevice);
// Copy data to GPU
cudaMemcpy(d_density, density, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_my, my, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), cudaMemcpyHostToDevice);
cudaSetDeviceFlags(cudaDeviceMapHost);
int *flag_cfd,*d_flag_cfd;
cudaHostAlloc((void**)&flag_cfd,sizeof( int),cudaHostAllocMapped);
cudaHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0);
clock_gettime(CLOCK_MONOTONIC,&t3);
cfd_kernel_cpu(cfd_nAtom, cfd_neighborList, normals, density, mx, my, mz, density_energy,
fluxes,flag_cfd);
clock_gettime(CLOCK_MONOTONIC,&t4);
printf("profiling time: %f\n", (t4.tv_sec-t3.tv_sec+(t4.tv_nsec-t3.tv_nsec)/1.e9));
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
int cfd_gridSize = (cfd_nAtom-1+BLOCK_SIZE) / BLOCK_SIZE;
cfd_kernel<<<cfd_gridSize, BLOCK_SIZE>>>(cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes,d_flag_cfd);
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time/ITERATIONS << endl;
cudaMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), cudaMemcpyDeviceToHost);
check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes);
//TODO:verified on small inputs
/*
ifstream fluxesF("../org/fluxes.txt");
for(int i=0; i<cfd_nAtom*NVAR; ++i) {
float f;
fluxesF >> f;
if(abs(f - fluxes[i]) > 0.001) {
fprintf(stderr, "Test failed! i = %d\n", i);
return 1;
}
}*/
// printf("Test passed!\n");
// fluxesF.close();
clock_gettime(CLOCK_MONOTONIC,&t2);
printf("total time: %f\n", (t2.tv_sec-t1.tv_sec+(t2.tv_nsec-t1.tv_nsec)/1.e9));
return 0;
}
|
e81cd30f9217683ac90fb28b41d70214a21fc207.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define N 1024
#define THREADX 16
#define THREADY 16
#define K_BLKSIZE 32
#define MA 2
#define MB 2
#define BS 16
#define KBS 8
__global__ void MatMul_v1(float *A, float *B, float *C)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
float tmp = 0.0f;
for (int i = 0; i < N; i++) {
tmp += A[row*N + i] * B[i*N + col];
}
C[row*N + col] = tmp;
}
__global__ void MatMul_v2(float *A, float *B, float *C)
{
int blkCol = blockIdx.x;
int blkRow = blockIdx.y;
int col = threadIdx.x;
int row = threadIdx.y;
float tmp = 0.0f;
__shared__ float A_blk[THREADY][K_BLKSIZE];
__shared__ float B_blk[K_BLKSIZE][THREADX];
for (int blk = 0; blk < N / K_BLKSIZE; blk++) {
for (int e = 0; e < K_BLKSIZE / THREADX; e++) {
A_blk[row][e*THREADX + col] = A[blkRow*blockDim.y*N + blk*K_BLKSIZE + row*N + e*THREADX + col];
}
for (int e = 0; e < K_BLKSIZE / THREADY; e++) {
B_blk[e*THREADY + row][col] = B[blk*K_BLKSIZE*N + blkCol*blockDim.x + e*THREADY*N + row*N + col];
}
__syncthreads();
for (int k = 0; k < K_BLKSIZE; k++) {
tmp += A_blk[row][k] * B_blk[k][col];
}
__syncthreads();
}
C[blkRow*blockDim.y*N + blkCol*blockDim.x + row*N + col] = tmp;
}
__global__ void MatMul_v3(float *A, float *B, float *C)
{
int blkidx = blockIdx.x;
int blkidy = blockIdx.y;
int tidx = threadIdx.x;
int tidy = threadIdx.y;
__shared__ float cacheS[(MA+MB)*BS*BS];
float *cacheA = cacheS;
float *cacheB = cacheS + MA*BS*BS;
float rst[MA][MB] = { { 0.0 } };
float regA[MA];
float regB[MB];
float tmpA[MA];
float tmpB[MB];
#pragma unroll
for (int blk = 0; blk < N; blk += BS) {
#pragma unroll
for (int i = 0; i < MA; i++) {
regA[i] = A[(blkidy*MA*BS + i*BS + tidy)*N + blk + tidx];
}
#pragma unroll
for (int i = 0; i < MB; i++) {
regB[i] = B[(blk + tidy)*N + blkidx*MB*BS + i*BS + tidx];
}
__syncthreads();
#pragma unroll
for (int i = 0; i < MA; i++) {
cacheA[i*BS*BS + tidy*BS + tidx] = regA[i];
}
#pragma unroll
for (int i = 0; i < MB; i++) {
cacheB[MB*BS*tidy + i*BS + tidx] = regB[i];
}
__syncthreads();
#pragma unroll
for (int i = 0; i < BS; i++) {
#pragma unroll
for (int ia = 0; ia < MA; ia++) {
tmpA[ia] = cacheA[ia*BS*BS + tidy*BS + i];
}
#pragma unroll
for (int ib = 0; ib < MB; ib++) {
tmpB[ib] = cacheB[MB*BS*i + ib*BS + tidx];
}
#pragma unroll
for (int ia = 0; ia < MA; ia++) {
#pragma unroll
for (int ib = 0; ib < MB; ib++) {
rst[ia][ib] += tmpA[ia]*tmpB[ib];
}
}
}
}
#pragma unroll
for (int ia = 0; ia < MA; ia++) {
#pragma unroll
for (int ib = 0; ib < MB; ib++) {
C[(blkidy*MA*BS+ia*BS+tidy)*N + blkidx*MB*BS + ib*BS + tidx] = rst[ia][ib];
}
}
}
__global__ void MatMul_v4(float *A, float *B, float *C)
{
int blkidx = blockIdx.x;
int blkidy = blockIdx.y;
int tidx = threadIdx.x;
int tidy = threadIdx.y;
__shared__ float cacheS[(MA+MB)*BS*BS*2];
float *cacheA = cacheS;
float *cacheB = cacheS + MA*BS*BS;
float rst[MA][MB] = { { 0.0 } };
float regA[MA];
float regB[MB];
float tmpA[MA];
float tmpB[MB];
// first iter
#pragma unroll
for (int i = 0; i < MA; i++) {
regA[i] = A[(blkidy*MA*BS + i*BS + tidy)*N + tidx];
}
#pragma unroll
for (int i = 0; i < MB; i++) {
regB[i] = B[tidy*N + blkidx*MB*BS + i*BS + tidx];
}
#pragma unroll
for (int i = 0; i < MA; i++) {
cacheA[i*BS*BS + tidy*BS + tidx] = regA[i];
}
#pragma unroll
for (int i = 0; i < MB; i++) {
cacheB[MB*BS*tidy + i*BS + tidx] = regB[i];
}
// intermediate iter
#pragma unroll
for (int blk = 1; blk < N/BS; blk++) {
#pragma unroll
for (int i = 0; i < MA; i++) {
regA[i] = A[(blkidy*MA*BS + i*BS + tidy)*N + blk*BS + tidx];
}
#pragma unroll
for (int i = 0; i < MB; i++) {
regB[i] = B[(blk*BS + tidy)*N + blkidx*MB*BS + i*BS + tidx];
}
#pragma unroll
for (int i = 0; i < MA; i++) {
cacheA[i*BS*BS + tidy*BS + tidx + (blk&1)*(MA+MB)*BS*BS] = regA[i];
}
#pragma unroll
for (int i = 0; i < MB; i++) {
cacheB[MB*BS*tidy + i*BS + tidx + (blk&1)*(MA+MB)*BS*BS] = regB[i];
}
__syncthreads();
#pragma unroll
for (int i = 0; i < BS; i++) {
#pragma unroll
for (int ia = 0; ia < MA; ia++) {
tmpA[ia] = cacheA[ia*BS*BS + tidy*BS + i + (!(blk&1))*(MA+MB)*BS*BS];
}
#pragma unroll
for (int ib = 0; ib < MB; ib++) {
tmpB[ib] = cacheB[MB*BS*i + ib*BS + tidx + (!(blk&1))*(MA+MB)*BS*BS];
}
#pragma unroll
for (int ia = 0; ia < MA; ia++) {
for (int ib = 0; ib < MB; ib++) {
rst[ia][ib] += tmpA[ia]*tmpB[ib];
}
}
}
}
// final iter
#pragma unroll
for (int i = 0; i < BS; i++) {
#pragma unroll
for (int ia = 0; ia < MA; ia++) {
tmpA[ia] = cacheA[ia*BS*BS + tidy*BS + i + (!((N/BS)&1))*(MA+MB)*BS*BS];
}
#pragma unroll
for (int ib = 0; ib < MB; ib++) {
tmpB[ib] = cacheB[MB*BS*i + ib*BS + tidx + (!((N/BS)&1))*(MA+MB)*BS*BS];
}
#pragma unroll
for (int ia = 0; ia < MA; ia++) {
for (int ib = 0; ib < MB; ib++) {
rst[ia][ib] += tmpA[ia]*tmpB[ib];
}
}
}
#pragma unroll
for (int ia = 0; ia < MA; ia++) {
#pragma unroll
for (int ib = 0; ib < MB; ib++) {
C[(blkidy*MA*BS+ia*BS+tidy)*N + blkidx*MB*BS + ib*BS + tidx] = rst[ia][ib];
}
}
}
int main(int argc, char* argv[])
{
int m = N;
int n = N;
int k = N;
float *a = (float*) malloc(m*k*sizeof(float));
float *b = (float*) malloc(k*n*sizeof(float));
float *c = (float*) malloc(m*n*sizeof(float));
for (int i = 0; i < m*k; i++) {
a[i] = i;
}
for (int i = 0; i < k*n; i++) {
b[i] = i;
}
float *d_a;
float *d_b;
float *d_c;
hipMalloc((void**)&d_a, m*k*sizeof(*a));
hipMalloc((void**)&d_b, k*n*sizeof(*b));
hipMalloc((void**)&d_c, m*n*sizeof(*c));
hipMemcpy(d_a, a, m*k*sizeof(*a), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, k*n*sizeof(*b), hipMemcpyHostToDevice);
dim3 threads(THREADX, THREADY);
//dim3 blocks(N/threads.x, N/threads.y);
dim3 blocks(N/MA/BS, N/MB/BS);
hipLaunchKernelGGL(( MatMul_v5), dim3(blocks), dim3(threads), 0, 0, d_a, d_b, d_c);
hipDeviceSynchronize();
hipMemcpy(c, d_c, m*n*sizeof(*c), hipMemcpyDeviceToHost);
for (int i = 0; i < 10; i++) {
fprintf(stderr, "%.2f ", c[i]);
}
fprintf(stderr, "\n");
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(a);
free(b);
free(c);
return 0;
}
| e81cd30f9217683ac90fb28b41d70214a21fc207.cu | #include <stdio.h>
#include <cuda.h>
#define N 1024
#define THREADX 16
#define THREADY 16
#define K_BLKSIZE 32
#define MA 2
#define MB 2
#define BS 16
#define KBS 8
__global__ void MatMul_v1(float *A, float *B, float *C)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
float tmp = 0.0f;
for (int i = 0; i < N; i++) {
tmp += A[row*N + i] * B[i*N + col];
}
C[row*N + col] = tmp;
}
__global__ void MatMul_v2(float *A, float *B, float *C)
{
int blkCol = blockIdx.x;
int blkRow = blockIdx.y;
int col = threadIdx.x;
int row = threadIdx.y;
float tmp = 0.0f;
__shared__ float A_blk[THREADY][K_BLKSIZE];
__shared__ float B_blk[K_BLKSIZE][THREADX];
for (int blk = 0; blk < N / K_BLKSIZE; blk++) {
for (int e = 0; e < K_BLKSIZE / THREADX; e++) {
A_blk[row][e*THREADX + col] = A[blkRow*blockDim.y*N + blk*K_BLKSIZE + row*N + e*THREADX + col];
}
for (int e = 0; e < K_BLKSIZE / THREADY; e++) {
B_blk[e*THREADY + row][col] = B[blk*K_BLKSIZE*N + blkCol*blockDim.x + e*THREADY*N + row*N + col];
}
__syncthreads();
for (int k = 0; k < K_BLKSIZE; k++) {
tmp += A_blk[row][k] * B_blk[k][col];
}
__syncthreads();
}
C[blkRow*blockDim.y*N + blkCol*blockDim.x + row*N + col] = tmp;
}
__global__ void MatMul_v3(float *A, float *B, float *C)
{
int blkidx = blockIdx.x;
int blkidy = blockIdx.y;
int tidx = threadIdx.x;
int tidy = threadIdx.y;
__shared__ float cacheS[(MA+MB)*BS*BS];
float *cacheA = cacheS;
float *cacheB = cacheS + MA*BS*BS;
float rst[MA][MB] = { { 0.0 } };
float regA[MA];
float regB[MB];
float tmpA[MA];
float tmpB[MB];
#pragma unroll
for (int blk = 0; blk < N; blk += BS) {
#pragma unroll
for (int i = 0; i < MA; i++) {
regA[i] = A[(blkidy*MA*BS + i*BS + tidy)*N + blk + tidx];
}
#pragma unroll
for (int i = 0; i < MB; i++) {
regB[i] = B[(blk + tidy)*N + blkidx*MB*BS + i*BS + tidx];
}
__syncthreads();
#pragma unroll
for (int i = 0; i < MA; i++) {
cacheA[i*BS*BS + tidy*BS + tidx] = regA[i];
}
#pragma unroll
for (int i = 0; i < MB; i++) {
cacheB[MB*BS*tidy + i*BS + tidx] = regB[i];
}
__syncthreads();
#pragma unroll
for (int i = 0; i < BS; i++) {
#pragma unroll
for (int ia = 0; ia < MA; ia++) {
tmpA[ia] = cacheA[ia*BS*BS + tidy*BS + i];
}
#pragma unroll
for (int ib = 0; ib < MB; ib++) {
tmpB[ib] = cacheB[MB*BS*i + ib*BS + tidx];
}
#pragma unroll
for (int ia = 0; ia < MA; ia++) {
#pragma unroll
for (int ib = 0; ib < MB; ib++) {
rst[ia][ib] += tmpA[ia]*tmpB[ib];
}
}
}
}
#pragma unroll
for (int ia = 0; ia < MA; ia++) {
#pragma unroll
for (int ib = 0; ib < MB; ib++) {
C[(blkidy*MA*BS+ia*BS+tidy)*N + blkidx*MB*BS + ib*BS + tidx] = rst[ia][ib];
}
}
}
__global__ void MatMul_v4(float *A, float *B, float *C)
{
int blkidx = blockIdx.x;
int blkidy = blockIdx.y;
int tidx = threadIdx.x;
int tidy = threadIdx.y;
__shared__ float cacheS[(MA+MB)*BS*BS*2];
float *cacheA = cacheS;
float *cacheB = cacheS + MA*BS*BS;
float rst[MA][MB] = { { 0.0 } };
float regA[MA];
float regB[MB];
float tmpA[MA];
float tmpB[MB];
// first iter
#pragma unroll
for (int i = 0; i < MA; i++) {
regA[i] = A[(blkidy*MA*BS + i*BS + tidy)*N + tidx];
}
#pragma unroll
for (int i = 0; i < MB; i++) {
regB[i] = B[tidy*N + blkidx*MB*BS + i*BS + tidx];
}
#pragma unroll
for (int i = 0; i < MA; i++) {
cacheA[i*BS*BS + tidy*BS + tidx] = regA[i];
}
#pragma unroll
for (int i = 0; i < MB; i++) {
cacheB[MB*BS*tidy + i*BS + tidx] = regB[i];
}
// intermediate iter
#pragma unroll
for (int blk = 1; blk < N/BS; blk++) {
#pragma unroll
for (int i = 0; i < MA; i++) {
regA[i] = A[(blkidy*MA*BS + i*BS + tidy)*N + blk*BS + tidx];
}
#pragma unroll
for (int i = 0; i < MB; i++) {
regB[i] = B[(blk*BS + tidy)*N + blkidx*MB*BS + i*BS + tidx];
}
#pragma unroll
for (int i = 0; i < MA; i++) {
cacheA[i*BS*BS + tidy*BS + tidx + (blk&1)*(MA+MB)*BS*BS] = regA[i];
}
#pragma unroll
for (int i = 0; i < MB; i++) {
cacheB[MB*BS*tidy + i*BS + tidx + (blk&1)*(MA+MB)*BS*BS] = regB[i];
}
__syncthreads();
#pragma unroll
for (int i = 0; i < BS; i++) {
#pragma unroll
for (int ia = 0; ia < MA; ia++) {
tmpA[ia] = cacheA[ia*BS*BS + tidy*BS + i + (!(blk&1))*(MA+MB)*BS*BS];
}
#pragma unroll
for (int ib = 0; ib < MB; ib++) {
tmpB[ib] = cacheB[MB*BS*i + ib*BS + tidx + (!(blk&1))*(MA+MB)*BS*BS];
}
#pragma unroll
for (int ia = 0; ia < MA; ia++) {
for (int ib = 0; ib < MB; ib++) {
rst[ia][ib] += tmpA[ia]*tmpB[ib];
}
}
}
}
// final iter
#pragma unroll
for (int i = 0; i < BS; i++) {
#pragma unroll
for (int ia = 0; ia < MA; ia++) {
tmpA[ia] = cacheA[ia*BS*BS + tidy*BS + i + (!((N/BS)&1))*(MA+MB)*BS*BS];
}
#pragma unroll
for (int ib = 0; ib < MB; ib++) {
tmpB[ib] = cacheB[MB*BS*i + ib*BS + tidx + (!((N/BS)&1))*(MA+MB)*BS*BS];
}
#pragma unroll
for (int ia = 0; ia < MA; ia++) {
for (int ib = 0; ib < MB; ib++) {
rst[ia][ib] += tmpA[ia]*tmpB[ib];
}
}
}
#pragma unroll
for (int ia = 0; ia < MA; ia++) {
#pragma unroll
for (int ib = 0; ib < MB; ib++) {
C[(blkidy*MA*BS+ia*BS+tidy)*N + blkidx*MB*BS + ib*BS + tidx] = rst[ia][ib];
}
}
}
int main(int argc, char* argv[])
{
int m = N;
int n = N;
int k = N;
float *a = (float*) malloc(m*k*sizeof(float));
float *b = (float*) malloc(k*n*sizeof(float));
float *c = (float*) malloc(m*n*sizeof(float));
for (int i = 0; i < m*k; i++) {
a[i] = i;
}
for (int i = 0; i < k*n; i++) {
b[i] = i;
}
float *d_a;
float *d_b;
float *d_c;
cudaMalloc((void**)&d_a, m*k*sizeof(*a));
cudaMalloc((void**)&d_b, k*n*sizeof(*b));
cudaMalloc((void**)&d_c, m*n*sizeof(*c));
cudaMemcpy(d_a, a, m*k*sizeof(*a), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, k*n*sizeof(*b), cudaMemcpyHostToDevice);
dim3 threads(THREADX, THREADY);
//dim3 blocks(N/threads.x, N/threads.y);
dim3 blocks(N/MA/BS, N/MB/BS);
MatMul_v5<<<blocks, threads>>>(d_a, d_b, d_c);
cudaDeviceSynchronize();
cudaMemcpy(c, d_c, m*n*sizeof(*c), cudaMemcpyDeviceToHost);
for (int i = 0; i < 10; i++) {
fprintf(stderr, "%.2f ", c[i]);
}
fprintf(stderr, "\n");
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(a);
free(b);
free(c);
return 0;
}
|
a4939dfabbf8863f2fc0968b282dd3dfa02019e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdGL.h"
#include <vector>
#include <iostream>
#include "objects.h"
#include "shader.h"
#include "helper_math.h"
#include <SDL2/SDL.h>
#include <SDL2/SDL_opengl.h>
//#include <SDL2/SDL_image.h>
using namespace std;
//GLOBAL VARIABLES//
//running or not
bool quit = false;
int Pause = 0;
//Window Size
int w = 1920;
int h = 1080;
//eye position and orientation
double ex = 0;
double ey = 0;
double ez = 0;
double zoom = 24;
double dzoom = 0;
double th = 0;
double ph = 0;
double dph = 0;
double dth = 0;
//Textures
unsigned int starTexture = 0;
//Shaders
int shader = 0;
int pixlight = 0;
//int textures = 0;
//int test = 0;
//Simulation Timestep
const float dt = 0.03125;
// Array Sizes
//const int N = pow(2,13);
const int N = pow(2,13);
const int M = 72;
int ping = 0;
int pong = 1;
float zeros[M*M*M*4] = {0.0};
float ones[M*M*M] = {1.0};
//Particle Arrays
float* verts = NULL;
float* pvels = NULL;
float* times = NULL;
float* colors = NULL;
float* dverts = NULL;
float* dpvels = NULL;
float* dtimes = NULL;
float* dcolors= NULL;
//Grid Arrays
float* h_gvels = NULL;
//float* h_gtemp = NULL;
//float* h_gdens = NULL;
float4* d_gvels[2] = {NULL};
//float* d_gtemp[2] = {NULL};
//float* d_gdens[2] = {NULL};
//float* d_gpres[2] = {NULL};
//float* d_diverge = NULL;
//User-controlled Computation Modes
bool stepmode = false;
bool gpu = true;
////////////////////
//functions that are called ahead of when they're defined
//because C
void reshape(int width, int height);
void keyboard(const Uint8* state);
///////// CUDA Functions //////////
// Arrays
// Grid [MxMxM]
//density
//temperature
//velocity
// Particles [N]
//position
//velocity
//time
//color
//typedef hipTextureObject_t hipTextureObject_t;
//typedef surface<void,cudaSurfaceType3D> surface<void,cudaSurfaceType3D>;
// non-texture-memory texture lookup function
__device__ float4 tex3d(float4* tex, float i, float j, float k, int s_i, int s_j, int s_k) {
//int r1 = floor(r); r1 = r1%s_r;
//int r2 = ceil(r); r2 = r2%s_r;
//int s1 = floor(s); s1 = s1%s_s;
//int s2 = ceil(s); s2 = s2%s_s;
//int t1 = floor(t); t1 = t1%s_t;
//int t2 = ceil(t); t2 = t2%s_t;
i = clamp(i, 0.0, s_i-1.0);
j = clamp(j, 0.0, s_j-1.0);
k = clamp(k, 0.0, s_k-1.0);
int i1 = floor(i);
int i2 = ceil (i);
int j1 = floor(j);
int j2 = ceil (j);
int k1 = floor(k);
int k2 = ceil (k);
//if (t1 == 0 || t2 == s_t-1)
// return 0.0;
float4 a = tex[i1*s_j*s_k + j1*s_k + k1];
float4 b = tex[i1*s_j*s_k + j1*s_k + k2];
float4 c = tex[i1*s_j*s_k + j2*s_k + k1];
float4 d = tex[i1*s_j*s_k + j2*s_k + k2];
float4 e = tex[i2*s_j*s_k + j1*s_k + k1];
float4 f = tex[i2*s_j*s_k + j1*s_k + k2];
float4 g = tex[i2*s_j*s_k + j2*s_k + k1];
float4 h = tex[i2*s_j*s_k + j2*s_k + k2];
return trilerp(a,b,c,d,e,f,g,h, i-i1,j-j1,k-k1);
}
__device__ float tex3d(float* tex, float i, float j, float k, int s_i, int s_j, int s_k) {
//int r1 = floor(r); r1 = r1%s_r;
//int r2 = ceil(r); r2 = r2%s_r;
//int s1 = floor(s); s1 = s1%s_s;
//int s2 = ceil(s); s2 = s2%s_s;
//int t1 = floor(t); t1 = t1%s_t;
//int t2 = ceil(t); t2 = t2%s_t;
i = clamp(i, 0.0, s_i-1.0);
j = clamp(j, 0.0, s_j-1.0);
k = clamp(k, 0.0, s_k-1.0);
int i1 = floor(i);
int i2 = ceil (i);
int j1 = floor(j);
int j2 = ceil (j);
int k1 = floor(k);
int k2 = ceil (k);
//if (t1 == 0 || t2 == s_t-1)
// return 0.0;
float a = tex[i1*s_j*s_k + j1*s_k + k1];
float b = tex[i1*s_j*s_k + j1*s_k + k2];
float c = tex[i1*s_j*s_k + j2*s_k + k1];
float d = tex[i1*s_j*s_k + j2*s_k + k2];
float e = tex[i2*s_j*s_k + j1*s_k + k1];
float f = tex[i2*s_j*s_k + j1*s_k + k2];
float g = tex[i2*s_j*s_k + j2*s_k + k1];
float h = tex[i2*s_j*s_k + j2*s_k + k2];
return trilerp(a,b,c,d,e,f,g,h, i-i1,j-j1,k-k1);
}
__device__ void set_bnd(float4* vels) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
if (i == 0) {
float4 src = vels[(i+1)*M*M + j*M + k];
vels[i*M*M + j*M + k] = make_float4(src.x, src.y, src.z, src.w);
//vels[i*M*M + j*M + k] = make_float4(0.0, 0.0, 0.0, src.w);
}
else if (i == M-1) {
float4 src = vels[(i-1)*M*M + j*M + k];
vels[i*M*M + j*M + k] = make_float4(src.x, src.y, src.z, src.w);
//vels[i*M*M + j*M + k] = make_float4(0.0, 0.0, 0.0, src.w);
}
if (j == 0) {
float4 src = vels[i*M*M + (j+1)*M + k];
vels[i*M*M + j*M + k] = make_float4(src.x, src.y, src.z, src.w);
//vels[i*M*M + j*M + k] = make_float4(0.0, 0.0, 0.0, src.w);
}
else if (j == M-1) {
float4 src = vels[i*M*M + (j-1)*M + k];
vels[i*M*M + j*M + k] = make_float4(src.x, src.y, src.z, src.w);
//vels[i*M*M + j*M + k] = make_float4(0.0, 0.0, 0.0, src.w);
}
if (k == 0) {
float4 src = vels[i*M*M + j*M + (k+1)];
vels[i*M*M + j*M + k] = make_float4(src.x, src.y, src.z, src.w);
//vels[i*M*M + j*M + k] = make_float4(0.0, 0.0, 0.0, src.w);
}
else if (k == M-1) {
float4 src = vels[i*M*M + j*M + (k-1)];
vels[i*M*M + j*M + k] = make_float4(src.x, src.y, src.z, src.w);
//vels[i*M*M + j*M + k] = make_float4(0.0, 0.0, 0.0, src.w);
}
}
__device__ void lin_solv(float4* x, float4* x0, float a, float c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
// TODO: Enable cooperative syncing iff block-edges become noticable or incompressibility is broken
// cooperative_groups::grid_group g = cooperative_groups::this_grid();
float cc = 1.0/c;
if (i > 0 && i < M-1 &&
j > 0 && j < M-1 &&
k > 0 && k < M-1) {
for (int iter=0; iter < 16; ++iter) {
x[i*M*M + j*M + k] =
(x0[i*M*M + j*M + k]
+ a*(x[(i+1)*M*M + j*M + k] + x[(i-1)*M*M + j*M + k]
+ x[i*M*M + (j+1)*M + k] + x[i*M*M + (j-1)*M + k]
+ x[i*M*M + j*M + (k+1)] + x[i*M*M + j*M + (k-1)])
)*cc;
set_bnd(x);
// g.sync();
}
}
}
__global__ void diffuse(float4* x, float4* x0, float diff) {
float a = dt * diff * (M-2)*(M-2);
lin_solv(x, x0, a, 1+6*a);
}
__global__ void pressure(float4* vels, float4* vels0) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
if (i > 0 && i < M-1 && j > 0 && j < M-1 && k > 0 && k < M-1) {
// collect neighboring densities
float p_x0 = vels0[(i-1)*M*M + j*M + k].w;
float p_x1 = vels0[(i+1)*M*M + j*M + k].w;
float p_y0 = vels0[i*M*M + (j-1)*M + k].w;
float p_y1 = vels0[i*M*M + (j+1)*M + k].w;
float p_z0 = vels0[i*M*M + j*M + (k-1)].w;
float p_z1 = vels0[i*M*M + j*M + (k+1)].w;
// collect neighboring velocities
float v_x0 = vels0[(i-1)*M*M + j*M + k].x;
float v_x1 = vels0[(i+1)*M*M + j*M + k].x;
float v_y0 = vels0[i*M*M + (j-1)*M + k].y;
float v_y1 = vels0[i*M*M + (j+1)*M + k].y;
float v_z0 = vels0[i*M*M + j*M + (k-1)].z;
float v_z1 = vels0[i*M*M + j*M + (k+1)].z;
// apply net pressure force
float d_x = 0.0;//p_x0 - p_x1;
float d_y = 0.0;//p_y0 - p_y1;
float d_z = 0.0;//p_z0 - p_z1;
// and add vertical buoyancy force
float p_b = vels0[i*M*M + j*M + k].w - 0.16666*(p_x0 + p_x1 + p_y0 + p_y1 + p_z0 + p_z1);
float buoy = 1.0;
//float a = dt;
//float a = 5.0;
// modify pressure based on net velocity
float d_p = dt * (v_x0 - v_x1
+ v_y0 - v_y1
+ v_z0 - v_z1);
vels[i*M*M + j*M + k] = vels0[i*M*M + j*M + k] + dt*make_float4(d_x, d_y, d_z+(buoy*p_b), d_p);
}
set_bnd(vels);
}
__global__ void project(float4* vels) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
// find divergences of adjacent cells
float dx0 = 0.0;
if (i > 1 && j > 0 && j < M-1 && k > 0 && k < M-1) {
dx0 = 0.16666*( - vels[(i-2)*M*M + j*M + k].x
+ vels[i*M*M + j*M + k].x
- vels[(i-1)*M*M + (j-1)*M + k].y
+ vels[(i-1)*M*M + (j+1)*M + k].y
- vels[(i-1)*M*M + j*M + (k-1)].z
+ vels[(i-1)*M*M + j*M + (k+1)].z);
}
float dx1 = 0.0;
if (i < M-2 && j > 0 && j < M-1 && k > 0 && k < M-1) {
dx1 = 0.16666*( - vels[i*M*M + j*M + k].x
+ vels[(i+2)*M*M + j*M + k].x
- vels[(i+1)*M*M + (j-1)*M + k].y
+ vels[(i+1)*M*M + (j+1)*M + k].y
- vels[(i+1)*M*M + j*M + (k-1)].z
+ vels[(i+1)*M*M + j*M + (k+1)].z);
}
float dy0 = 0.0;
if (i > 0 && i < M-1 && j > 1 && k > 0 && k < M-1) {
dy0 = 0.16666*( - vels[(i-1)*M*M + (j-1)*M + k].x
+ vels[(i+1)*M*M + (j-1)*M + k].x
- vels[i*M*M + (j-2)*M + k].y
+ vels[i*M*M + j*M + k].y
- vels[i*M*M + (j-1)*M + (k-1)].z
+ vels[i*M*M + (j-1)*M + (k+1)].z);
}
float dy1 = 0.0;
if (i > 0 && i < M-1 && j < M-2 && k > 0 && k < M-1) {
dy1 = 0.16666*( - vels[(i-1)*M*M + (j+1)*M + k].x
+ vels[(i+1)*M*M + (j+1)*M + k].x
- vels[i*M*M + j*M + k].y
+ vels[i*M*M + (j+2)*M + k].y
- vels[i*M*M + (j+1)*M + (k-1)].z
+ vels[i*M*M + (j+1)*M + (k+1)].z);
}
float dz0 = 0.0;
if (i > 0 && i < M-1 && j > 0 && j < M-1 && k > 1) {
dz0 = 0.16666*( - vels[(i-1)*M*M + j*M + (k-1)].x
+ vels[(i+1)*M*M + j*M + (k-1)].x
- vels[i*M*M + (j-1)*M + (k-1)].y
+ vels[i*M*M + (j+1)*M + (k-1)].y
- vels[i*M*M + j*M + (k-2)].z
+ vels[i*M*M + j*M + k].z);
}
float dz1 = 0.0;
if (i > 0 && i < M-1 && j > 0 && j < M-1 && k < M-2) {
dz1 = 0.16666*( - vels[(i-1)*M*M + j*M + (k+1)].x
+ vels[(i+1)*M*M + j*M + (k+1)].x
- vels[i*M*M + (j-1)*M + (k+1)].y
+ vels[i*M*M + (j+1)*M + (k+1)].y
- vels[i*M*M + j*M + k].z
+ vels[i*M*M + j*M + (k+2)].z);
}
// subtract pressure vectors from velocities
vels[i*M*M + j*M + k].x -= 0.5*(dx1 - dx0);
vels[i*M*M + j*M + k].y -= 0.5*(dy1 - dy0);
vels[i*M*M + j*M + k].z -= 0.5*(dz1 - dz0);
set_bnd(vels);
}
__global__ void balance(float4* vels) {//, float4* vels0) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
//float sum_pres = 100.0;
float sum_pres = 1.0;
//for (int I=1; I < M*M*M; ++I) {
// sum_pres += vels0[I].w;
//}
sum_pres = sum_pres / (M*M*M);
vels[i*M*M + j*M + k].w -= sum_pres;
set_bnd(vels);
}
__global__ void advect(float4* vels_out, float4* vels_in) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
float fi = i - dt*vels_in[i*M*M + j*M + k].x;
float fj = j - dt*vels_in[i*M*M + j*M + k].y;
float fk = k - dt*vels_in[i*M*M + j*M + k].z;
vels_out[i*M*M + j*M + k] = tex3d(vels_in, fi, fj, fk, M,M,M);
//if (i == M/2 && j == M/2 && k == 8) {
// vels[i*M*M + j*M + k].z = 0.25;
//}
set_bnd(vels_out);
}
__global__ void pingpong(float4* x, float4* x0) {
// this has better performance than memcpy
int I = blockIdx.x*blockDim.x + threadIdx.x;
if (I < M*M*M) x[I] = x0[I];
}
__global__ void pstep(float4* gvels, float* verts, float* times, float* colors) {
// times index
int I = blockIdx.x*blockDim.x + threadIdx.x;
// verts & colors index
int i = I * 3;
// texture lookup of velocity at the particle's location
float4 V = tex3d(gvels, verts[i], verts[i+1], verts[i+2], M,M,M);
verts[i ] += V.x;
verts[i+1] += V.y;
verts[i+2] += V.z;
times[I] -= 0.002f;
colors[i ] = sqrt(times[I]);
colors[i+1] = max(times[I]/1.125f, 0.0f);
colors[i+2] = pow(times[I],2.0f)/2;
//colors[i ] = max(0.2, abs(V.x));
//colors[i+1] = max(0.2, abs(V.y));
//colors[i+2] = max(0.2, abs(V.z));
}
void step_gpu(float* verts, float* times, float* colors,
float4* gvel0, float4* gvel1, //float* gpres0, float* gpres1,
const int N, const int M, int t) {
int b = 8;
dim3 gBlock(M/b,M/b,M/b);
dim3 gThread(b,b,b);
float visc = 10.0;
// Diffuse Velocities
hipLaunchKernelGGL(( diffuse), dim3(gBlock),dim3(gThread), 0, 0, gvel1, gvel0, visc);
//void** diffuse_args[3];
//diffuse_args[0] = (void**)&gvel1; diffuse_args[1] = (void**)&gvel0; diffuse_args[2] = (void**)&visc;
//hipLaunchCooperativeKernel((void*)diffuse, gBlock, gThread, (void**)diffuse_args);
// Project
//project<<<gBlock,gThread>>>(gvel1);
// Pressure
hipLaunchKernelGGL(( pressure), dim3(gBlock),dim3(gThread), 0, 0, gvel1, gvel0);
// Balance pressure
hipLaunchKernelGGL(( balance), dim3(gBlock),dim3(gThread), 0, 0, gvel1);//, gvel1);
// Advect Velocities
hipLaunchKernelGGL(( advect), dim3(gBlock),dim3(gThread), 0, 0, gvel0, gvel1);
// Ping the Pong
//int Mblocks = ceil(M*M*M/512.0);
//pingpong<<<Mblocks,512>>>(gvel1, gvel0);
// Move Particles
hipLaunchKernelGGL(( pstep), dim3(N/512),dim3(512), 0, 0, gvel1, verts, times, colors);
}
void step_cpu(float* verts, float* vels, float* times, float* colors, int N) {
#pragma omp parallel for
for (int I=0; I < N; ++I) {
int i = 3*I;
verts[i ] += vels[i ];
verts[i+1] += vels[i+1];
verts[i+2] += vels[i+2] + 0.003*(1.0-times[I]);
times[I] -= 0.0001;
colors[i ] = sqrt(times[I]);
colors[i+1] = max(times[I]/1.125, 0.0);
colors[i+2] = pow(times[I],2);
if (times[I] <= 0.0) {
times[I] = 1.0;
verts[i ] = M/2;
verts[i+1] = M/2;
verts[i+2] = M/2;
}
}
}
//////// SDL Init Function ////////
bool init(SDL_Window** window, SDL_GLContext* context)
{
bool success = true;
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER) != 0)
{
cerr << "SDL failed to initialize: " << SDL_GetError() << endl;
success = false;
}
*window = SDL_CreateWindow("Flame", 0,0, w,h, SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN | SDL_WINDOW_RESIZABLE);
if (*window == NULL)
{
cerr << "SDL failed to create a window: " << SDL_GetError() << endl;
success = false;
}
*context = SDL_GL_CreateContext(*window);
if (*context == NULL)
{
cerr << "SDL failed to create OpenGL context: " << SDL_GetError() << endl;
success = false;
}
//Vsync
if (SDL_GL_SetSwapInterval(1) < 0)
{
cerr << "SDL could not set Vsync: " << SDL_GetError() << endl;
// success = false;
}
cout << SDL_GetError() << endl;
return success;
}
///////////////////////////////////
void display(SDL_Window* window, int r)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
//glEnable(GL_CULL_FACE);
//reshape(w,h);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
//view angle
ex = Sin(-th)*Cos(ph)*zoom;
ey = Cos(-th)*Cos(ph)*zoom;
ez = Sin(ph)*zoom;
gluLookAt(ex+M/2,ey+M/2,ez+M/2, M/2,M/2,M/2, 0,0,Cos(ph));
// lighting
glEnable(GL_LIGHTING);
float white[4] = {1.0,1.0,1.0,1.0};
float pos[4] = {M/2+2.0, M/2-2.0, M/2+4.0, 1.0};
float ambient[4] = {0.12, 0.15, 0.16, 1.0};
float diffuse[4] = {0.65, 0.65, 0.60, 1.0};
float specular[4]= {0.7, 0.7, 0.9, 1.0};
float shininess = 64;
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE);
glEnable(GL_COLOR_MATERIAL);
glEnable(GL_LIGHT0);
glLightfv(GL_LIGHT0, GL_AMBIENT, ambient);
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuse);
glLightfv(GL_LIGHT0, GL_SPECULAR, specular);
glLightfv(GL_LIGHT0, GL_POSITION, pos);
glMaterialfv(GL_FRONT, GL_SHININESS, &shininess);
glMaterialfv(GL_FRONT, GL_SPECULAR, white);
// Object Rendering
//glUseProgram(pixlight);
//glColor3f(1.0,1.0,1.0);
//ball(M/2,M/2,M/2, 0.25);
glUseProgram(shader);
glDisable(GL_LIGHTING);
glDisable(GL_DEPTH_TEST);
glBindTexture(GL_TEXTURE_2D, starTexture);
int id = glGetUniformLocation(shader, "star");
if (id>=0) glUniform1i(id,0);
// ^ current bound texture, star.bmp
id = glGetUniformLocation(shader, "size");
if (id>=0) glUniform1f(id,0.2);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE,GL_ONE);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(3,GL_FLOAT,0,verts);
glColorPointer(3,GL_FLOAT,0,colors);
//cout << "verts: " << verts[0] << " \t" << verts[1] << " \t" << verts[2] << endl;
//cout << "color: " << colors[0]<< " \t" << colors[1]<< " \t" << colors[2] << endl;
glDrawArrays(GL_POINTS,0,N);
glDisable(GL_BLEND);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
// show velocities for debug purposes
hipError_t err = hipMemcpy(h_gvels, d_gvels[ping], 4*M*M*M*sizeof(float), hipMemcpyDeviceToHost); if (err != hipSuccess) {cout << "hipMemcpy failed: " << hipGetErrorString(err) << endl; quit = true;}
//cout << "Successfully copied Velocities from Device to Host\n";
glUseProgram(0);
glEnable(GL_DEPTH_TEST);
glBegin(GL_LINES);
for (int i=0; i < M; ++i) {
for (int j=0; j < M; ++j) {
for (int k=0; k < M; ++k) {
glColor3f(1.0,0.5,0.0);
glVertex3f(i, j, k);
float x = h_gvels[4*(i*M*M + j*M + k) ]*5.0;
float y = h_gvels[4*(i*M*M + j*M + k)+1]*5.0;
float z = h_gvels[4*(i*M*M + j*M + k)+2]*5.0;
//float x = 0.0;
//float y = 0.0;
//float z = h_gvels[4*(i*M*M + j*M + k)+3]*10.0;
glColor3f(0.5,0.0,0.0);
glVertex3f(i+x, j+y, k+z);
}
}
}
glEnd();
glDisable(GL_DEPTH_TEST);
//// show other values for debug purposes
//hipError_t err = hipMemcpy(h_gtemp, d_gtemp[ping], M*M*M*sizeof(float), hipMemcpyDeviceToHost); if (err != hipSuccess) {cout << "hipMemcpy failed: " << hipGetErrorString(err) << endl; quit = true;}
//glUseProgram(0);
//glBegin(GL_LINES);
//for (int i=0; i < M; ++i) {
// for (int j=0; j < M; ++j) {
// for (int k=0; k < M; ++k) {
// glColor3f(1.0,1.0,1.0);
// glVertex3f(i, j, k);
// float z = h_gtemp[i*M*M + j*M + k]*10.0;
// glColor3f(0.1,0.1,0.1);
// glVertex3f(i, j, k+z);
// }
// }
//}
//glEnd();
//swap the buffers
glFlush();
SDL_GL_SwapWindow(window);
}
void physics(int r)
{
const Uint8* state = SDL_GetKeyboardState(NULL);
keyboard(state);
//adjust the eye position
th += dth;
ph += dph;
zoom = zoom<2.0?2.0:zoom+dzoom;
// Step Flame Animation ////
if (!stepmode && !Pause) {
if (gpu) {
if(hipSuccess != hipMemcpy(verts, dverts, 3*N*sizeof(float), hipMemcpyDeviceToHost)) cout << "memcpy fail from " << dverts << " to " << verts << "\n";
if(hipSuccess != hipMemcpy(times, dtimes, N*sizeof(float), hipMemcpyDeviceToHost)) cout << "memcpy fail from " << dtimes << " to " << times << "\n";
if(hipSuccess != hipMemcpy(colors,dcolors,3*N*sizeof(float), hipMemcpyDeviceToHost)) cout << "memcpy fail from " << dcolors << " to " << colors << "\n";
//cout << "successfully copied Particles from Device to Host" << endl;
for (int I=0; I < N; ++I) {
int i = I*3;
if (times[I] < 0.0 ||
verts[i] < 0.0 ||
verts[i] > M ||
verts[i+1] < 0.0 ||
verts[i+1] > M ||
verts[i+2] < 0.0 ||
verts[i+2] > M ) {
times[I] = 1.0f;
verts[i ] = 8*((float)rand()/(float)RAND_MAX-0.5) + M/2;
verts[i+1] = 8*((float)rand()/(float)RAND_MAX-0.5) + M/2;
verts[i+2] = 8*((float)rand()/(float)RAND_MAX-0.5) + M/2;
}
}
if (true) {//r < 100000) {
if(hipSuccess != hipMemcpy(h_gvels, d_gvels[ping], 4*M*M*M*sizeof(float), hipMemcpyDeviceToHost)) cout << "memcpy fail from " << dcolors << " to " << colors << "\n";
h_gvels[4*((M/2)*M*M + (M/2)*M + (M/2))+3] += 1.0;
//h_gvels[4*((M/2)*M*M + (M/2)*M + (M/2))+2] += 1.0;
//h_gvels[4*((M/2-1)*M*M + (M/2)*M + (M/2))+0] = -1.0;
//h_gvels[4*((M/2+1)*M*M + (M/2)*M + (M/2))+0] = 1.0;
//h_gvels[4*((M/2)*M*M + (M/2-1)*M + (M/2))+1] = -1.0;
//h_gvels[4*((M/2)*M*M + (M/2+1)*M + (M/2))+1] = 1.0;
//h_gvels[4*((M/2)*M*M + (M/2)*M + (M/2-1))+2] = -1.0;
//h_gvels[4*((M/2)*M*M + (M/2)*M + (M/2+1))+2] = 1.0;
if(hipSuccess != hipMemcpy(d_gvels[ping], h_gvels, 4*M*M*M*sizeof(float), hipMemcpyHostToDevice)) cout << "memcpy fail from " << verts << " to " << dverts << "\n";
}
//if(hipSuccess != hipMemcpy(d_gpres[0], zeros, M*M*M*sizeof(float), hipMemcpyHostToDevice)) cout << "failure to memcpy: " << endl;
//if(hipSuccess != hipMemcpy(d_gpres[1], zeros, M*M*M*sizeof(float), hipMemcpyHostToDevice)) cout << "failure to memcpy: " << endl;
if(hipSuccess != hipMemcpy(dverts, verts, 3*N*sizeof(float), hipMemcpyHostToDevice)) cout << "memcpy fail from " << verts << " to " << dverts << "\n";
if(hipSuccess != hipMemcpy(dtimes, times, N*sizeof(float), hipMemcpyHostToDevice)) cout << "memcpy fail from " << times << " to " << dtimes << "\n";
if(hipSuccess != hipMemcpy(dcolors,colors,3*N*sizeof(float), hipMemcpyHostToDevice)) cout << "memcpy fail from " << colors << " to " << dcolors << "\n";
//cout << "successfully copied Particles from Host to Device" << endl;
step_gpu(dverts, dtimes, dcolors,
d_gvels[ping], d_gvels[pong],// d_gtemp[ping], d_gtemp[pong], d_gdens[ping], d_gdens[pong], d_gpres[0], d_gpres[1], d_diverge,
//s_gvels[pong], s_gtemp[pong], s_gdens[pong],
N, M, r);
ping = pong;
pong = 1-pong;
}
else {
step_cpu(verts, pvels, times, colors, N);
}
}
////////////////////////////
}
void reshape(int width, int height)
{
w = width;
h = height;
//new aspect ratio
double w2h = (height > 0) ? (double)width/height : 1;
//set viewport to the new window
glViewport(0,0 , width,height);
//switch to projection matrix
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
//adjust projection
//glOrtho(-w2h, w2h, -1, 1, -1, 1);
gluPerspective(60, w2h, 1.0, 2*M);
//switch back to model matrix
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
static void Reverse(void* x,const int n)
{
int k;
char* ch = (char*)x;
for (k=0;k<n/2;k++)
{
char tmp = ch[k];
ch[k] = ch[n-1-k];
ch[n-1-k] = tmp;
}
}
int LoadTexture(const char* file) {
unsigned int texture; // Texture name
FILE* f; // File pointer
unsigned short magic; // Image magic
int dx,dy;
unsigned int size; // Image dimensions
unsigned short nbp,bpp; // Planes and bits per pixel
unsigned char* image; // Image data
unsigned int k; // Counter
int max; // Maximum texture dimensions
// Open file
f = fopen(file,"rb");
if (!f) fprintf(stderr,"Cannot open file %s\n",file);
// Check image magic
if (fread(&magic,2,1,f)!=1) fprintf(stderr,"Cannot read magic from %s\n",file);
if (magic!=0x4D42 && magic!=0x424D) fprintf(stderr,"Image magic not BMP in %s\n",file);
// Seek to and read header
if (fseek(f,16,SEEK_CUR) || fread(&dx ,4,1,f)!=1 || fread(&dy ,4,1,f)!=1 ||
fread(&nbp,2,1,f)!=1 || fread(&bpp,2,1,f)!=1 || fread(&k,4,1,f)!=1)
fprintf(stderr,"Cannot read header from %s\n",file);
// Reverse bytes on big endian hardware (detected by backwards magic)
if (magic==0x424D)
{
Reverse(&dx,4);
Reverse(&dy,4);
Reverse(&nbp,2);
Reverse(&bpp,2);
Reverse(&k,4);
}
dx = abs(dx);
dy = abs(dy);
// Check image parameters
glGetIntegerv(GL_MAX_TEXTURE_SIZE,&max);
if (dx<1 || dx>max) fprintf(stderr,"%s image width %d out of range 1-%d\n",file,dx,max);
if (dy<1 || dy>max) fprintf(stderr,"%s image height %d out of range 1-%d\n",file,dy,max);
if (nbp!=1) fprintf(stderr,"%s bit planes is not 1: %d\n",file,nbp);
if (bpp!=24) fprintf(stderr,"%s bits per pixel is not 24: %d\n",file,bpp);
if (k!=0) fprintf(stderr,"%s comdenssed files not supported\n",file);
#ifndef GL_VERSION_2_0
// OpenGL 2.0 lifts the restriction that texture size must be a power of two
for (k=1;k<dx;k*=2);
if (k!=dx) fprintf(stderr,"%s image width not a power of two: %d\n",file,dx);
for (k=1;k<dy;k*=2);
if (k!=dy) fprintf(stderr,"%s image height not a power of two: %d\n",file,dy);
#endif
// Allocate image memory
size = 3*dx*dy;
image = (unsigned char*) malloc(size);
if (!image) fprintf(stderr,"Cannot allocate %d bytes of memory for image %s\n",size,file);
// Seek to and read image
if (fseek(f,20,SEEK_CUR) || fread(image,size,1,f)!=1) fprintf(stderr,"Error reading data from image %s\n",file);
fclose(f);
// Reverse pvels (BGR -> RGB)
for (k=0;k<size;k+=3)
{
unsigned char temp = image[k];
image[k] = image[k+2];
image[k+2] = temp;
}
// Sanity check
//ErrCheck("LoadTexBMP");
// Generate 2D texture
glGenTextures(1,&texture);
glBindTexture(GL_TEXTURE_2D,texture);
// Copy image
glTexImage2D(GL_TEXTURE_2D,0,3,dx,dy,0,GL_RGB,GL_UNSIGNED_BYTE,image);
if (glGetError()) fprintf(stderr,"Error in glTexImage2D %s %dx%d\n",file,dx,dy);
// Scale linearly when image size doesn't match
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
// Free image memory
free(image);
// Return texture name
return texture;
}
// Per frame keyboard input here, per keydenss input in main()
void keyboard(const Uint8* state)
{
//if (state[SDL_SCANCODE_ESCAPE])
// quit = true;
if (state[SDL_SCANCODE_LEFT])
dth = -0.75;
else if (state[SDL_SCANCODE_RIGHT])
dth = 0.75;
else
dth = 0;
if (state[SDL_SCANCODE_DOWN])
dph = -0.75;
else if (state[SDL_SCANCODE_UP])
dph = 0.75;
else
dph = 0;
if (state[SDL_SCANCODE_Z])
dzoom = -0.10;
else if (state[SDL_SCANCODE_X])
dzoom = 0.10;
else
dzoom = 0;
}
// all user interaction goes here
bool handleEvents()
{
SDL_Event event;
while (SDL_PollEvent(&event))
{
switch(event.type)
{
case SDL_QUIT:
return true;
case SDL_KEYDOWN:
switch (event.key.keysym.scancode)
{
case SDL_SCANCODE_Q:
return true;
case SDL_SCANCODE_SPACE:
Pause = 1 - Pause;
break;
case SDL_SCANCODE_M:
stepmode = !stepmode;
break;
case SDL_SCANCODE_G:
gpu = !gpu;
break;
default:
break;
}
case SDL_WINDOWEVENT:
if (event.window.event == SDL_WINDOWEVENT_SIZE_CHANGED)
{
//cerr << event.window.data1 << " " << event.window.data2 << endl;
reshape(event.window.data1, event.window.data2);
}
break;
}
}
return false;
}
int main(int argc, char *argv[])
{
//SDL Window/OpenGL Context
SDL_Window* window = NULL;
SDL_GLContext context;
//Initialize
if (init(&window, &context) != true)
{
cerr << "Shutting Down\n";
return 1;
}
h_gvels = new float[4*M*M*M];
//h_gtemp = new float[M*M*M];
verts = new float[3*N];
pvels = new float[3*N];
times = new float[N];
colors= new float[3*N];
//memset(verts, 0.0, 3*N*sizeof(float));
//memset(pvels ,0.0, 3*N*sizeof(float));
for (int i=0; i < 3*N; i += 3) {
verts[i ] = 8*((float)rand()/(float)RAND_MAX - 0.5) + M/2;
verts[i+1] = 8*((float)rand()/(float)RAND_MAX - 0.5) + M/2;
verts[i+2] = 8*((float)rand()/(float)RAND_MAX - 0.5) + M/2;
pvels[i ] = ((float)rand()/(float)RAND_MAX - 0.5)/1000.0;
pvels[i+1] = ((float)rand()/(float)RAND_MAX - 0.5)/1000.0;
pvels[i+2] = ((float)rand()/(float)RAND_MAX - 0.5)/1000.0;
//verts[i] = 0;
//pvels[i] = 0;
}
for (int i=0; i < N; ++i)
times[i]= ((float)rand()/(float)RAND_MAX);
//times[i]= 0;
//allocate particle and grid arrays
if(hipSuccess != hipMalloc(&dverts, 3*N*sizeof(float))) cout << "failure to allocate\n";
if(hipSuccess != hipMalloc(&dpvels, 3*N*sizeof(float))) cout << "failure to allocate\n";
if(hipSuccess != hipMalloc(&dtimes, N*sizeof(float))) cout << "failure to allocate\n";
if(hipSuccess != hipMalloc(&dcolors,3*N*sizeof(float))) cout << "failure to allocate\n";
if(hipSuccess != hipMalloc(&d_gvels[0],4*M*M*M*sizeof(float))) cout << "failure to allocate\n";
if(hipSuccess != hipMalloc(&d_gvels[1],4*M*M*M*sizeof(float))) cout << "failure to allocate\n";
//if(hipSuccess != hipMalloc(&d_gtemp[0], M*M*M*sizeof(float))) cout << "failure to allocate\n";
//if(hipSuccess != hipMalloc(&d_gtemp[1], M*M*M*sizeof(float))) cout << "failure to allocate\n";
//if(hipSuccess != hipMalloc(&d_gdens[0], M*M*M*sizeof(float))) cout << "failure to allocate\n";
//if(hipSuccess != hipMalloc(&d_gdens[1], M*M*M*sizeof(float))) cout << "failure to allocate\n";
//if(hipSuccess != hipMalloc(&d_gpres[0], M*M*M*sizeof(float))) cout << "failure to allocate\n";
//if(hipSuccess != hipMalloc(&d_gpres[1], M*M*M*sizeof(float))) cout << "failure to allocate\n";
//if(hipSuccess != hipMalloc(&d_diverge, M*M*M*sizeof(float))) cout << "failure to allocate\n";
//memset(zeros, 0.0, 4*M*M*M*sizeof(float));
hipError_t err;
err = hipMemcpy(d_gvels[0], zeros, 4*M*M*M*sizeof(float), hipMemcpyHostToDevice); if (err) cout << "failure to memcpy: " << hipGetErrorString(err) << endl;
err = hipMemcpy(d_gvels[1], zeros, 4*M*M*M*sizeof(float), hipMemcpyHostToDevice); if (err) cout << "failure to memcpy: " << hipGetErrorString(err) << endl;
//err = hipMemcpy(d_gtemp[0], zeros, M*M*M*sizeof(float), hipMemcpyHostToDevice); if (err) cout << "failure to memcpy: " << hipGetErrorString(err) << endl;
//err = hipMemcpy(d_gtemp[1], zeros, M*M*M*sizeof(float), hipMemcpyHostToDevice); if (err) cout << "failure to memcpy: " << hipGetErrorString(err) << endl;
//err = hipMemcpy(d_gpres[0], zeros, M*M*M*sizeof(float), hipMemcpyHostToDevice); if (err) cout << "failure to memcpy: " << hipGetErrorString(err) << endl;
//err = hipMemcpy(d_gpres[1], zeros, M*M*M*sizeof(float), hipMemcpyHostToDevice); if (err) cout << "failure to memcpy: " << hipGetErrorString(err) << endl;
//err = hipMemcpy(d_diverge, zeros, M*M*M*sizeof(float), hipMemcpyHostToDevice); if (err) cout << "failure to memcpy: " << hipGetErrorString(err) << endl;
//
//err = hipMemcpy(d_gdens[0], zeros, M*M*M*sizeof(float), hipMemcpyHostToDevice); if (err) cout << "failure to memcpy: " << hipGetErrorString(err) << endl;
//err = hipMemcpy(d_gdens[1], zeros, M*M*M*sizeof(float), hipMemcpyHostToDevice); if (err) cout << "failure to memcpy: " << hipGetErrorString(err) << endl;
if(hipSuccess != hipMemcpy(dverts, verts, 3*N*sizeof(float), hipMemcpyHostToDevice)) cout << "memcpy fail\n";
if(hipSuccess != hipMemcpy(dtimes, times, N*sizeof(float), hipMemcpyHostToDevice)) cout << "memcpy fail\n";
if(hipSuccess != hipMemcpy(dcolors,colors,3*N*sizeof(float), hipMemcpyHostToDevice)) cout << "memcpy fail\n";
if(hipSuccess != hipMemcpy(dpvels, pvels, 3*N*sizeof(float), hipMemcpyHostToDevice)) cout << "memcpy fail\n";
if (err) quit = true;
//////////////////////////////////////////////////////
starTexture = LoadTexture("star.bmp");
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexEnvi(GL_TEXTURE_2D, GL_TEXTURE_ENV_MODE, GL_REPLACE);
//Timing
int r = 0;
int dr = 0;
int oldr = 0;
//int Pause = 0;
int frames = 0;
//shader
shader = CreateShaderProgGeom((char*)"flame.vert", (char*)"flame.geom", (char*)"flame.frag");
pixlight = CreateShaderProg((char*)"pixlight.vert", (char*)"pixlight.frag");
reshape(w,h);
int startuptime = SDL_GetTicks();
oldr = startuptime;
////////Main Loop////////
//bool quit = false;
try {
while (!quit)
{
//cout << "handling events\n";
quit = handleEvents();
////Physics Timing////
r = SDL_GetTicks();
dr += r - oldr;
while (dr >= 250)
{
// 1000/8 = 125 updates per second
physics(r);
dr -= 250;
}
oldr = r;
display(window, r);
frames += 1;
//quit = true;
}
}
catch (...) {cout << "catch block\n";}
cout << "Shutting Down\n";
cout << "average framerate: " << 1000*(float)frames/(r - startuptime) << endl;
hipFree(dverts);
hipFree(dpvels);
hipFree(dtimes);
hipFree(dcolors);
hipFree(d_gvels[0]);
hipFree(d_gvels[1]);
//hipFree(d_gtemp[0]);
//hipFree(d_gtemp[1]);
//hipFree(d_gdens[0]);
//hipFree(d_gdens[1]);
//hipFree(d_gpres[0]);
//hipFree(d_gpres[1]);
//hipFree(d_diverge);
delete verts;
delete pvels;
delete times;
delete colors;
delete h_gvels;
//delete h_gtemp;
SDL_Quit();
return 0;
}
| a4939dfabbf8863f2fc0968b282dd3dfa02019e3.cu | #include "stdGL.h"
#include <vector>
#include <iostream>
#include "objects.h"
#include "shader.h"
#include "helper_math.h"
#include <SDL2/SDL.h>
#include <SDL2/SDL_opengl.h>
//#include <SDL2/SDL_image.h>
using namespace std;
//GLOBAL VARIABLES//
//running or not
bool quit = false;
int Pause = 0;
//Window Size
int w = 1920;
int h = 1080;
//eye position and orientation
double ex = 0;
double ey = 0;
double ez = 0;
double zoom = 24;
double dzoom = 0;
double th = 0;
double ph = 0;
double dph = 0;
double dth = 0;
//Textures
unsigned int starTexture = 0;
//Shaders
int shader = 0;
int pixlight = 0;
//int textures = 0;
//int test = 0;
//Simulation Timestep
const float dt = 0.03125;
// Array Sizes
//const int N = pow(2,13);
const int N = pow(2,13);
const int M = 72;
int ping = 0;
int pong = 1;
float zeros[M*M*M*4] = {0.0};
float ones[M*M*M] = {1.0};
//Particle Arrays
float* verts = NULL;
float* pvels = NULL;
float* times = NULL;
float* colors = NULL;
float* dverts = NULL;
float* dpvels = NULL;
float* dtimes = NULL;
float* dcolors= NULL;
//Grid Arrays
float* h_gvels = NULL;
//float* h_gtemp = NULL;
//float* h_gdens = NULL;
float4* d_gvels[2] = {NULL};
//float* d_gtemp[2] = {NULL};
//float* d_gdens[2] = {NULL};
//float* d_gpres[2] = {NULL};
//float* d_diverge = NULL;
//User-controlled Computation Modes
bool stepmode = false;
bool gpu = true;
////////////////////
//functions that are called ahead of when they're defined
//because C
void reshape(int width, int height);
void keyboard(const Uint8* state);
///////// CUDA Functions //////////
// Arrays
// Grid [MxMxM]
//density
//temperature
//velocity
// Particles [N]
//position
//velocity
//time
//color
//typedef cudaTextureObject_t cudaTextureObject_t;
//typedef surface<void,cudaSurfaceType3D> surface<void,cudaSurfaceType3D>;
// non-texture-memory texture lookup function
__device__ float4 tex3d(float4* tex, float i, float j, float k, int s_i, int s_j, int s_k) {
//int r1 = floor(r); r1 = r1%s_r;
//int r2 = ceil(r); r2 = r2%s_r;
//int s1 = floor(s); s1 = s1%s_s;
//int s2 = ceil(s); s2 = s2%s_s;
//int t1 = floor(t); t1 = t1%s_t;
//int t2 = ceil(t); t2 = t2%s_t;
i = clamp(i, 0.0, s_i-1.0);
j = clamp(j, 0.0, s_j-1.0);
k = clamp(k, 0.0, s_k-1.0);
int i1 = floor(i);
int i2 = ceil (i);
int j1 = floor(j);
int j2 = ceil (j);
int k1 = floor(k);
int k2 = ceil (k);
//if (t1 == 0 || t2 == s_t-1)
// return 0.0;
float4 a = tex[i1*s_j*s_k + j1*s_k + k1];
float4 b = tex[i1*s_j*s_k + j1*s_k + k2];
float4 c = tex[i1*s_j*s_k + j2*s_k + k1];
float4 d = tex[i1*s_j*s_k + j2*s_k + k2];
float4 e = tex[i2*s_j*s_k + j1*s_k + k1];
float4 f = tex[i2*s_j*s_k + j1*s_k + k2];
float4 g = tex[i2*s_j*s_k + j2*s_k + k1];
float4 h = tex[i2*s_j*s_k + j2*s_k + k2];
return trilerp(a,b,c,d,e,f,g,h, i-i1,j-j1,k-k1);
}
__device__ float tex3d(float* tex, float i, float j, float k, int s_i, int s_j, int s_k) {
//int r1 = floor(r); r1 = r1%s_r;
//int r2 = ceil(r); r2 = r2%s_r;
//int s1 = floor(s); s1 = s1%s_s;
//int s2 = ceil(s); s2 = s2%s_s;
//int t1 = floor(t); t1 = t1%s_t;
//int t2 = ceil(t); t2 = t2%s_t;
i = clamp(i, 0.0, s_i-1.0);
j = clamp(j, 0.0, s_j-1.0);
k = clamp(k, 0.0, s_k-1.0);
int i1 = floor(i);
int i2 = ceil (i);
int j1 = floor(j);
int j2 = ceil (j);
int k1 = floor(k);
int k2 = ceil (k);
//if (t1 == 0 || t2 == s_t-1)
// return 0.0;
float a = tex[i1*s_j*s_k + j1*s_k + k1];
float b = tex[i1*s_j*s_k + j1*s_k + k2];
float c = tex[i1*s_j*s_k + j2*s_k + k1];
float d = tex[i1*s_j*s_k + j2*s_k + k2];
float e = tex[i2*s_j*s_k + j1*s_k + k1];
float f = tex[i2*s_j*s_k + j1*s_k + k2];
float g = tex[i2*s_j*s_k + j2*s_k + k1];
float h = tex[i2*s_j*s_k + j2*s_k + k2];
return trilerp(a,b,c,d,e,f,g,h, i-i1,j-j1,k-k1);
}
__device__ void set_bnd(float4* vels) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
if (i == 0) {
float4 src = vels[(i+1)*M*M + j*M + k];
vels[i*M*M + j*M + k] = make_float4(src.x, src.y, src.z, src.w);
//vels[i*M*M + j*M + k] = make_float4(0.0, 0.0, 0.0, src.w);
}
else if (i == M-1) {
float4 src = vels[(i-1)*M*M + j*M + k];
vels[i*M*M + j*M + k] = make_float4(src.x, src.y, src.z, src.w);
//vels[i*M*M + j*M + k] = make_float4(0.0, 0.0, 0.0, src.w);
}
if (j == 0) {
float4 src = vels[i*M*M + (j+1)*M + k];
vels[i*M*M + j*M + k] = make_float4(src.x, src.y, src.z, src.w);
//vels[i*M*M + j*M + k] = make_float4(0.0, 0.0, 0.0, src.w);
}
else if (j == M-1) {
float4 src = vels[i*M*M + (j-1)*M + k];
vels[i*M*M + j*M + k] = make_float4(src.x, src.y, src.z, src.w);
//vels[i*M*M + j*M + k] = make_float4(0.0, 0.0, 0.0, src.w);
}
if (k == 0) {
float4 src = vels[i*M*M + j*M + (k+1)];
vels[i*M*M + j*M + k] = make_float4(src.x, src.y, src.z, src.w);
//vels[i*M*M + j*M + k] = make_float4(0.0, 0.0, 0.0, src.w);
}
else if (k == M-1) {
float4 src = vels[i*M*M + j*M + (k-1)];
vels[i*M*M + j*M + k] = make_float4(src.x, src.y, src.z, src.w);
//vels[i*M*M + j*M + k] = make_float4(0.0, 0.0, 0.0, src.w);
}
}
__device__ void lin_solv(float4* x, float4* x0, float a, float c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
// TODO: Enable cooperative syncing iff block-edges become noticable or incompressibility is broken
// cooperative_groups::grid_group g = cooperative_groups::this_grid();
float cc = 1.0/c;
if (i > 0 && i < M-1 &&
j > 0 && j < M-1 &&
k > 0 && k < M-1) {
for (int iter=0; iter < 16; ++iter) {
x[i*M*M + j*M + k] =
(x0[i*M*M + j*M + k]
+ a*(x[(i+1)*M*M + j*M + k] + x[(i-1)*M*M + j*M + k]
+ x[i*M*M + (j+1)*M + k] + x[i*M*M + (j-1)*M + k]
+ x[i*M*M + j*M + (k+1)] + x[i*M*M + j*M + (k-1)])
)*cc;
set_bnd(x);
// g.sync();
}
}
}
__global__ void diffuse(float4* x, float4* x0, float diff) {
float a = dt * diff * (M-2)*(M-2);
lin_solv(x, x0, a, 1+6*a);
}
__global__ void pressure(float4* vels, float4* vels0) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
if (i > 0 && i < M-1 && j > 0 && j < M-1 && k > 0 && k < M-1) {
// collect neighboring densities
float p_x0 = vels0[(i-1)*M*M + j*M + k].w;
float p_x1 = vels0[(i+1)*M*M + j*M + k].w;
float p_y0 = vels0[i*M*M + (j-1)*M + k].w;
float p_y1 = vels0[i*M*M + (j+1)*M + k].w;
float p_z0 = vels0[i*M*M + j*M + (k-1)].w;
float p_z1 = vels0[i*M*M + j*M + (k+1)].w;
// collect neighboring velocities
float v_x0 = vels0[(i-1)*M*M + j*M + k].x;
float v_x1 = vels0[(i+1)*M*M + j*M + k].x;
float v_y0 = vels0[i*M*M + (j-1)*M + k].y;
float v_y1 = vels0[i*M*M + (j+1)*M + k].y;
float v_z0 = vels0[i*M*M + j*M + (k-1)].z;
float v_z1 = vels0[i*M*M + j*M + (k+1)].z;
// apply net pressure force
float d_x = 0.0;//p_x0 - p_x1;
float d_y = 0.0;//p_y0 - p_y1;
float d_z = 0.0;//p_z0 - p_z1;
// and add vertical buoyancy force
float p_b = vels0[i*M*M + j*M + k].w - 0.16666*(p_x0 + p_x1 + p_y0 + p_y1 + p_z0 + p_z1);
float buoy = 1.0;
//float a = dt;
//float a = 5.0;
// modify pressure based on net velocity
float d_p = dt * (v_x0 - v_x1
+ v_y0 - v_y1
+ v_z0 - v_z1);
vels[i*M*M + j*M + k] = vels0[i*M*M + j*M + k] + dt*make_float4(d_x, d_y, d_z+(buoy*p_b), d_p);
}
set_bnd(vels);
}
__global__ void project(float4* vels) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
// find divergences of adjacent cells
float dx0 = 0.0;
if (i > 1 && j > 0 && j < M-1 && k > 0 && k < M-1) {
dx0 = 0.16666*( - vels[(i-2)*M*M + j*M + k].x
+ vels[i*M*M + j*M + k].x
- vels[(i-1)*M*M + (j-1)*M + k].y
+ vels[(i-1)*M*M + (j+1)*M + k].y
- vels[(i-1)*M*M + j*M + (k-1)].z
+ vels[(i-1)*M*M + j*M + (k+1)].z);
}
float dx1 = 0.0;
if (i < M-2 && j > 0 && j < M-1 && k > 0 && k < M-1) {
dx1 = 0.16666*( - vels[i*M*M + j*M + k].x
+ vels[(i+2)*M*M + j*M + k].x
- vels[(i+1)*M*M + (j-1)*M + k].y
+ vels[(i+1)*M*M + (j+1)*M + k].y
- vels[(i+1)*M*M + j*M + (k-1)].z
+ vels[(i+1)*M*M + j*M + (k+1)].z);
}
float dy0 = 0.0;
if (i > 0 && i < M-1 && j > 1 && k > 0 && k < M-1) {
dy0 = 0.16666*( - vels[(i-1)*M*M + (j-1)*M + k].x
+ vels[(i+1)*M*M + (j-1)*M + k].x
- vels[i*M*M + (j-2)*M + k].y
+ vels[i*M*M + j*M + k].y
- vels[i*M*M + (j-1)*M + (k-1)].z
+ vels[i*M*M + (j-1)*M + (k+1)].z);
}
float dy1 = 0.0;
if (i > 0 && i < M-1 && j < M-2 && k > 0 && k < M-1) {
dy1 = 0.16666*( - vels[(i-1)*M*M + (j+1)*M + k].x
+ vels[(i+1)*M*M + (j+1)*M + k].x
- vels[i*M*M + j*M + k].y
+ vels[i*M*M + (j+2)*M + k].y
- vels[i*M*M + (j+1)*M + (k-1)].z
+ vels[i*M*M + (j+1)*M + (k+1)].z);
}
float dz0 = 0.0;
if (i > 0 && i < M-1 && j > 0 && j < M-1 && k > 1) {
dz0 = 0.16666*( - vels[(i-1)*M*M + j*M + (k-1)].x
+ vels[(i+1)*M*M + j*M + (k-1)].x
- vels[i*M*M + (j-1)*M + (k-1)].y
+ vels[i*M*M + (j+1)*M + (k-1)].y
- vels[i*M*M + j*M + (k-2)].z
+ vels[i*M*M + j*M + k].z);
}
float dz1 = 0.0;
if (i > 0 && i < M-1 && j > 0 && j < M-1 && k < M-2) {
dz1 = 0.16666*( - vels[(i-1)*M*M + j*M + (k+1)].x
+ vels[(i+1)*M*M + j*M + (k+1)].x
- vels[i*M*M + (j-1)*M + (k+1)].y
+ vels[i*M*M + (j+1)*M + (k+1)].y
- vels[i*M*M + j*M + k].z
+ vels[i*M*M + j*M + (k+2)].z);
}
// subtract pressure vectors from velocities
vels[i*M*M + j*M + k].x -= 0.5*(dx1 - dx0);
vels[i*M*M + j*M + k].y -= 0.5*(dy1 - dy0);
vels[i*M*M + j*M + k].z -= 0.5*(dz1 - dz0);
set_bnd(vels);
}
__global__ void balance(float4* vels) {//, float4* vels0) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
//float sum_pres = 100.0;
float sum_pres = 1.0;
//for (int I=1; I < M*M*M; ++I) {
// sum_pres += vels0[I].w;
//}
sum_pres = sum_pres / (M*M*M);
vels[i*M*M + j*M + k].w -= sum_pres;
set_bnd(vels);
}
__global__ void advect(float4* vels_out, float4* vels_in) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
float fi = i - dt*vels_in[i*M*M + j*M + k].x;
float fj = j - dt*vels_in[i*M*M + j*M + k].y;
float fk = k - dt*vels_in[i*M*M + j*M + k].z;
vels_out[i*M*M + j*M + k] = tex3d(vels_in, fi, fj, fk, M,M,M);
//if (i == M/2 && j == M/2 && k == 8) {
// vels[i*M*M + j*M + k].z = 0.25;
//}
set_bnd(vels_out);
}
__global__ void pingpong(float4* x, float4* x0) {
// this has better performance than memcpy
int I = blockIdx.x*blockDim.x + threadIdx.x;
if (I < M*M*M) x[I] = x0[I];
}
__global__ void pstep(float4* gvels, float* verts, float* times, float* colors) {
// times index
int I = blockIdx.x*blockDim.x + threadIdx.x;
// verts & colors index
int i = I * 3;
// texture lookup of velocity at the particle's location
float4 V = tex3d(gvels, verts[i], verts[i+1], verts[i+2], M,M,M);
verts[i ] += V.x;
verts[i+1] += V.y;
verts[i+2] += V.z;
times[I] -= 0.002f;
colors[i ] = sqrt(times[I]);
colors[i+1] = max(times[I]/1.125f, 0.0f);
colors[i+2] = pow(times[I],2.0f)/2;
//colors[i ] = max(0.2, abs(V.x));
//colors[i+1] = max(0.2, abs(V.y));
//colors[i+2] = max(0.2, abs(V.z));
}
void step_gpu(float* verts, float* times, float* colors,
float4* gvel0, float4* gvel1, //float* gpres0, float* gpres1,
const int N, const int M, int t) {
int b = 8;
dim3 gBlock(M/b,M/b,M/b);
dim3 gThread(b,b,b);
float visc = 10.0;
// Diffuse Velocities
diffuse<<<gBlock,gThread>>>(gvel1, gvel0, visc);
//void** diffuse_args[3];
//diffuse_args[0] = (void**)&gvel1; diffuse_args[1] = (void**)&gvel0; diffuse_args[2] = (void**)&visc;
//cudaLaunchCooperativeKernel((void*)diffuse, gBlock, gThread, (void**)diffuse_args);
// Project
//project<<<gBlock,gThread>>>(gvel1);
// Pressure
pressure<<<gBlock,gThread>>>(gvel1, gvel0);
// Balance pressure
balance<<<gBlock,gThread>>>(gvel1);//, gvel1);
// Advect Velocities
advect<<<gBlock,gThread>>>(gvel0, gvel1);
// Ping the Pong
//int Mblocks = ceil(M*M*M/512.0);
//pingpong<<<Mblocks,512>>>(gvel1, gvel0);
// Move Particles
pstep<<<N/512,512>>>(gvel1, verts, times, colors);
}
void step_cpu(float* verts, float* vels, float* times, float* colors, int N) {
#pragma omp parallel for
for (int I=0; I < N; ++I) {
int i = 3*I;
verts[i ] += vels[i ];
verts[i+1] += vels[i+1];
verts[i+2] += vels[i+2] + 0.003*(1.0-times[I]);
times[I] -= 0.0001;
colors[i ] = sqrt(times[I]);
colors[i+1] = max(times[I]/1.125, 0.0);
colors[i+2] = pow(times[I],2);
if (times[I] <= 0.0) {
times[I] = 1.0;
verts[i ] = M/2;
verts[i+1] = M/2;
verts[i+2] = M/2;
}
}
}
//////// SDL Init Function ////////
bool init(SDL_Window** window, SDL_GLContext* context)
{
bool success = true;
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER) != 0)
{
cerr << "SDL failed to initialize: " << SDL_GetError() << endl;
success = false;
}
*window = SDL_CreateWindow("Flame", 0,0, w,h, SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN | SDL_WINDOW_RESIZABLE);
if (*window == NULL)
{
cerr << "SDL failed to create a window: " << SDL_GetError() << endl;
success = false;
}
*context = SDL_GL_CreateContext(*window);
if (*context == NULL)
{
cerr << "SDL failed to create OpenGL context: " << SDL_GetError() << endl;
success = false;
}
//Vsync
if (SDL_GL_SetSwapInterval(1) < 0)
{
cerr << "SDL could not set Vsync: " << SDL_GetError() << endl;
// success = false;
}
cout << SDL_GetError() << endl;
return success;
}
///////////////////////////////////
void display(SDL_Window* window, int r)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
//glEnable(GL_CULL_FACE);
//reshape(w,h);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
//view angle
ex = Sin(-th)*Cos(ph)*zoom;
ey = Cos(-th)*Cos(ph)*zoom;
ez = Sin(ph)*zoom;
gluLookAt(ex+M/2,ey+M/2,ez+M/2, M/2,M/2,M/2, 0,0,Cos(ph));
// lighting
glEnable(GL_LIGHTING);
float white[4] = {1.0,1.0,1.0,1.0};
float pos[4] = {M/2+2.0, M/2-2.0, M/2+4.0, 1.0};
float ambient[4] = {0.12, 0.15, 0.16, 1.0};
float diffuse[4] = {0.65, 0.65, 0.60, 1.0};
float specular[4]= {0.7, 0.7, 0.9, 1.0};
float shininess = 64;
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE);
glEnable(GL_COLOR_MATERIAL);
glEnable(GL_LIGHT0);
glLightfv(GL_LIGHT0, GL_AMBIENT, ambient);
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuse);
glLightfv(GL_LIGHT0, GL_SPECULAR, specular);
glLightfv(GL_LIGHT0, GL_POSITION, pos);
glMaterialfv(GL_FRONT, GL_SHININESS, &shininess);
glMaterialfv(GL_FRONT, GL_SPECULAR, white);
// Object Rendering
//glUseProgram(pixlight);
//glColor3f(1.0,1.0,1.0);
//ball(M/2,M/2,M/2, 0.25);
glUseProgram(shader);
glDisable(GL_LIGHTING);
glDisable(GL_DEPTH_TEST);
glBindTexture(GL_TEXTURE_2D, starTexture);
int id = glGetUniformLocation(shader, "star");
if (id>=0) glUniform1i(id,0);
// ^ current bound texture, star.bmp
id = glGetUniformLocation(shader, "size");
if (id>=0) glUniform1f(id,0.2);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE,GL_ONE);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(3,GL_FLOAT,0,verts);
glColorPointer(3,GL_FLOAT,0,colors);
//cout << "verts: " << verts[0] << " \t" << verts[1] << " \t" << verts[2] << endl;
//cout << "color: " << colors[0]<< " \t" << colors[1]<< " \t" << colors[2] << endl;
glDrawArrays(GL_POINTS,0,N);
glDisable(GL_BLEND);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
// show velocities for debug purposes
cudaError_t err = cudaMemcpy(h_gvels, d_gvels[ping], 4*M*M*M*sizeof(float), cudaMemcpyDeviceToHost); if (err != cudaSuccess) {cout << "cudaMemcpy failed: " << cudaGetErrorString(err) << endl; quit = true;}
//cout << "Successfully copied Velocities from Device to Host\n";
glUseProgram(0);
glEnable(GL_DEPTH_TEST);
glBegin(GL_LINES);
for (int i=0; i < M; ++i) {
for (int j=0; j < M; ++j) {
for (int k=0; k < M; ++k) {
glColor3f(1.0,0.5,0.0);
glVertex3f(i, j, k);
float x = h_gvels[4*(i*M*M + j*M + k) ]*5.0;
float y = h_gvels[4*(i*M*M + j*M + k)+1]*5.0;
float z = h_gvels[4*(i*M*M + j*M + k)+2]*5.0;
//float x = 0.0;
//float y = 0.0;
//float z = h_gvels[4*(i*M*M + j*M + k)+3]*10.0;
glColor3f(0.5,0.0,0.0);
glVertex3f(i+x, j+y, k+z);
}
}
}
glEnd();
glDisable(GL_DEPTH_TEST);
//// show other values for debug purposes
//cudaError_t err = cudaMemcpy(h_gtemp, d_gtemp[ping], M*M*M*sizeof(float), cudaMemcpyDeviceToHost); if (err != cudaSuccess) {cout << "cudaMemcpy failed: " << cudaGetErrorString(err) << endl; quit = true;}
//glUseProgram(0);
//glBegin(GL_LINES);
//for (int i=0; i < M; ++i) {
// for (int j=0; j < M; ++j) {
// for (int k=0; k < M; ++k) {
// glColor3f(1.0,1.0,1.0);
// glVertex3f(i, j, k);
// float z = h_gtemp[i*M*M + j*M + k]*10.0;
// glColor3f(0.1,0.1,0.1);
// glVertex3f(i, j, k+z);
// }
// }
//}
//glEnd();
//swap the buffers
glFlush();
SDL_GL_SwapWindow(window);
}
void physics(int r)
{
const Uint8* state = SDL_GetKeyboardState(NULL);
keyboard(state);
//adjust the eye position
th += dth;
ph += dph;
zoom = zoom<2.0?2.0:zoom+dzoom;
// Step Flame Animation ////
if (!stepmode && !Pause) {
if (gpu) {
if(cudaSuccess != cudaMemcpy(verts, dverts, 3*N*sizeof(float), cudaMemcpyDeviceToHost)) cout << "memcpy fail from " << dverts << " to " << verts << "\n";
if(cudaSuccess != cudaMemcpy(times, dtimes, N*sizeof(float), cudaMemcpyDeviceToHost)) cout << "memcpy fail from " << dtimes << " to " << times << "\n";
if(cudaSuccess != cudaMemcpy(colors,dcolors,3*N*sizeof(float), cudaMemcpyDeviceToHost)) cout << "memcpy fail from " << dcolors << " to " << colors << "\n";
//cout << "successfully copied Particles from Device to Host" << endl;
for (int I=0; I < N; ++I) {
int i = I*3;
if (times[I] < 0.0 ||
verts[i] < 0.0 ||
verts[i] > M ||
verts[i+1] < 0.0 ||
verts[i+1] > M ||
verts[i+2] < 0.0 ||
verts[i+2] > M ) {
times[I] = 1.0f;
verts[i ] = 8*((float)rand()/(float)RAND_MAX-0.5) + M/2;
verts[i+1] = 8*((float)rand()/(float)RAND_MAX-0.5) + M/2;
verts[i+2] = 8*((float)rand()/(float)RAND_MAX-0.5) + M/2;
}
}
if (true) {//r < 100000) {
if(cudaSuccess != cudaMemcpy(h_gvels, d_gvels[ping], 4*M*M*M*sizeof(float), cudaMemcpyDeviceToHost)) cout << "memcpy fail from " << dcolors << " to " << colors << "\n";
h_gvels[4*((M/2)*M*M + (M/2)*M + (M/2))+3] += 1.0;
//h_gvels[4*((M/2)*M*M + (M/2)*M + (M/2))+2] += 1.0;
//h_gvels[4*((M/2-1)*M*M + (M/2)*M + (M/2))+0] = -1.0;
//h_gvels[4*((M/2+1)*M*M + (M/2)*M + (M/2))+0] = 1.0;
//h_gvels[4*((M/2)*M*M + (M/2-1)*M + (M/2))+1] = -1.0;
//h_gvels[4*((M/2)*M*M + (M/2+1)*M + (M/2))+1] = 1.0;
//h_gvels[4*((M/2)*M*M + (M/2)*M + (M/2-1))+2] = -1.0;
//h_gvels[4*((M/2)*M*M + (M/2)*M + (M/2+1))+2] = 1.0;
if(cudaSuccess != cudaMemcpy(d_gvels[ping], h_gvels, 4*M*M*M*sizeof(float), cudaMemcpyHostToDevice)) cout << "memcpy fail from " << verts << " to " << dverts << "\n";
}
//if(cudaSuccess != cudaMemcpy(d_gpres[0], zeros, M*M*M*sizeof(float), cudaMemcpyHostToDevice)) cout << "failure to memcpy: " << endl;
//if(cudaSuccess != cudaMemcpy(d_gpres[1], zeros, M*M*M*sizeof(float), cudaMemcpyHostToDevice)) cout << "failure to memcpy: " << endl;
if(cudaSuccess != cudaMemcpy(dverts, verts, 3*N*sizeof(float), cudaMemcpyHostToDevice)) cout << "memcpy fail from " << verts << " to " << dverts << "\n";
if(cudaSuccess != cudaMemcpy(dtimes, times, N*sizeof(float), cudaMemcpyHostToDevice)) cout << "memcpy fail from " << times << " to " << dtimes << "\n";
if(cudaSuccess != cudaMemcpy(dcolors,colors,3*N*sizeof(float), cudaMemcpyHostToDevice)) cout << "memcpy fail from " << colors << " to " << dcolors << "\n";
//cout << "successfully copied Particles from Host to Device" << endl;
step_gpu(dverts, dtimes, dcolors,
d_gvels[ping], d_gvels[pong],// d_gtemp[ping], d_gtemp[pong], d_gdens[ping], d_gdens[pong], d_gpres[0], d_gpres[1], d_diverge,
//s_gvels[pong], s_gtemp[pong], s_gdens[pong],
N, M, r);
ping = pong;
pong = 1-pong;
}
else {
step_cpu(verts, pvels, times, colors, N);
}
}
////////////////////////////
}
void reshape(int width, int height)
{
w = width;
h = height;
//new aspect ratio
double w2h = (height > 0) ? (double)width/height : 1;
//set viewport to the new window
glViewport(0,0 , width,height);
//switch to projection matrix
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
//adjust projection
//glOrtho(-w2h, w2h, -1, 1, -1, 1);
gluPerspective(60, w2h, 1.0, 2*M);
//switch back to model matrix
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
static void Reverse(void* x,const int n)
{
int k;
char* ch = (char*)x;
for (k=0;k<n/2;k++)
{
char tmp = ch[k];
ch[k] = ch[n-1-k];
ch[n-1-k] = tmp;
}
}
int LoadTexture(const char* file) {
unsigned int texture; // Texture name
FILE* f; // File pointer
unsigned short magic; // Image magic
int dx,dy;
unsigned int size; // Image dimensions
unsigned short nbp,bpp; // Planes and bits per pixel
unsigned char* image; // Image data
unsigned int k; // Counter
int max; // Maximum texture dimensions
// Open file
f = fopen(file,"rb");
if (!f) fprintf(stderr,"Cannot open file %s\n",file);
// Check image magic
if (fread(&magic,2,1,f)!=1) fprintf(stderr,"Cannot read magic from %s\n",file);
if (magic!=0x4D42 && magic!=0x424D) fprintf(stderr,"Image magic not BMP in %s\n",file);
// Seek to and read header
if (fseek(f,16,SEEK_CUR) || fread(&dx ,4,1,f)!=1 || fread(&dy ,4,1,f)!=1 ||
fread(&nbp,2,1,f)!=1 || fread(&bpp,2,1,f)!=1 || fread(&k,4,1,f)!=1)
fprintf(stderr,"Cannot read header from %s\n",file);
// Reverse bytes on big endian hardware (detected by backwards magic)
if (magic==0x424D)
{
Reverse(&dx,4);
Reverse(&dy,4);
Reverse(&nbp,2);
Reverse(&bpp,2);
Reverse(&k,4);
}
dx = abs(dx);
dy = abs(dy);
// Check image parameters
glGetIntegerv(GL_MAX_TEXTURE_SIZE,&max);
if (dx<1 || dx>max) fprintf(stderr,"%s image width %d out of range 1-%d\n",file,dx,max);
if (dy<1 || dy>max) fprintf(stderr,"%s image height %d out of range 1-%d\n",file,dy,max);
if (nbp!=1) fprintf(stderr,"%s bit planes is not 1: %d\n",file,nbp);
if (bpp!=24) fprintf(stderr,"%s bits per pixel is not 24: %d\n",file,bpp);
if (k!=0) fprintf(stderr,"%s comdenssed files not supported\n",file);
#ifndef GL_VERSION_2_0
// OpenGL 2.0 lifts the restriction that texture size must be a power of two
for (k=1;k<dx;k*=2);
if (k!=dx) fprintf(stderr,"%s image width not a power of two: %d\n",file,dx);
for (k=1;k<dy;k*=2);
if (k!=dy) fprintf(stderr,"%s image height not a power of two: %d\n",file,dy);
#endif
// Allocate image memory
size = 3*dx*dy;
image = (unsigned char*) malloc(size);
if (!image) fprintf(stderr,"Cannot allocate %d bytes of memory for image %s\n",size,file);
// Seek to and read image
if (fseek(f,20,SEEK_CUR) || fread(image,size,1,f)!=1) fprintf(stderr,"Error reading data from image %s\n",file);
fclose(f);
// Reverse pvels (BGR -> RGB)
for (k=0;k<size;k+=3)
{
unsigned char temp = image[k];
image[k] = image[k+2];
image[k+2] = temp;
}
// Sanity check
//ErrCheck("LoadTexBMP");
// Generate 2D texture
glGenTextures(1,&texture);
glBindTexture(GL_TEXTURE_2D,texture);
// Copy image
glTexImage2D(GL_TEXTURE_2D,0,3,dx,dy,0,GL_RGB,GL_UNSIGNED_BYTE,image);
if (glGetError()) fprintf(stderr,"Error in glTexImage2D %s %dx%d\n",file,dx,dy);
// Scale linearly when image size doesn't match
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
// Free image memory
free(image);
// Return texture name
return texture;
}
// Per frame keyboard input here, per keydenss input in main()
void keyboard(const Uint8* state)
{
//if (state[SDL_SCANCODE_ESCAPE])
// quit = true;
if (state[SDL_SCANCODE_LEFT])
dth = -0.75;
else if (state[SDL_SCANCODE_RIGHT])
dth = 0.75;
else
dth = 0;
if (state[SDL_SCANCODE_DOWN])
dph = -0.75;
else if (state[SDL_SCANCODE_UP])
dph = 0.75;
else
dph = 0;
if (state[SDL_SCANCODE_Z])
dzoom = -0.10;
else if (state[SDL_SCANCODE_X])
dzoom = 0.10;
else
dzoom = 0;
}
// all user interaction goes here
bool handleEvents()
{
SDL_Event event;
while (SDL_PollEvent(&event))
{
switch(event.type)
{
case SDL_QUIT:
return true;
case SDL_KEYDOWN:
switch (event.key.keysym.scancode)
{
case SDL_SCANCODE_Q:
return true;
case SDL_SCANCODE_SPACE:
Pause = 1 - Pause;
break;
case SDL_SCANCODE_M:
stepmode = !stepmode;
break;
case SDL_SCANCODE_G:
gpu = !gpu;
break;
default:
break;
}
case SDL_WINDOWEVENT:
if (event.window.event == SDL_WINDOWEVENT_SIZE_CHANGED)
{
//cerr << event.window.data1 << " " << event.window.data2 << endl;
reshape(event.window.data1, event.window.data2);
}
break;
}
}
return false;
}
int main(int argc, char *argv[])
{
//SDL Window/OpenGL Context
SDL_Window* window = NULL;
SDL_GLContext context;
//Initialize
if (init(&window, &context) != true)
{
cerr << "Shutting Down\n";
return 1;
}
h_gvels = new float[4*M*M*M];
//h_gtemp = new float[M*M*M];
verts = new float[3*N];
pvels = new float[3*N];
times = new float[N];
colors= new float[3*N];
//memset(verts, 0.0, 3*N*sizeof(float));
//memset(pvels ,0.0, 3*N*sizeof(float));
for (int i=0; i < 3*N; i += 3) {
verts[i ] = 8*((float)rand()/(float)RAND_MAX - 0.5) + M/2;
verts[i+1] = 8*((float)rand()/(float)RAND_MAX - 0.5) + M/2;
verts[i+2] = 8*((float)rand()/(float)RAND_MAX - 0.5) + M/2;
pvels[i ] = ((float)rand()/(float)RAND_MAX - 0.5)/1000.0;
pvels[i+1] = ((float)rand()/(float)RAND_MAX - 0.5)/1000.0;
pvels[i+2] = ((float)rand()/(float)RAND_MAX - 0.5)/1000.0;
//verts[i] = 0;
//pvels[i] = 0;
}
for (int i=0; i < N; ++i)
times[i]= ((float)rand()/(float)RAND_MAX);
//times[i]= 0;
//allocate particle and grid arrays
if(cudaSuccess != cudaMalloc(&dverts, 3*N*sizeof(float))) cout << "failure to allocate\n";
if(cudaSuccess != cudaMalloc(&dpvels, 3*N*sizeof(float))) cout << "failure to allocate\n";
if(cudaSuccess != cudaMalloc(&dtimes, N*sizeof(float))) cout << "failure to allocate\n";
if(cudaSuccess != cudaMalloc(&dcolors,3*N*sizeof(float))) cout << "failure to allocate\n";
if(cudaSuccess != cudaMalloc(&d_gvels[0],4*M*M*M*sizeof(float))) cout << "failure to allocate\n";
if(cudaSuccess != cudaMalloc(&d_gvels[1],4*M*M*M*sizeof(float))) cout << "failure to allocate\n";
//if(cudaSuccess != cudaMalloc(&d_gtemp[0], M*M*M*sizeof(float))) cout << "failure to allocate\n";
//if(cudaSuccess != cudaMalloc(&d_gtemp[1], M*M*M*sizeof(float))) cout << "failure to allocate\n";
//if(cudaSuccess != cudaMalloc(&d_gdens[0], M*M*M*sizeof(float))) cout << "failure to allocate\n";
//if(cudaSuccess != cudaMalloc(&d_gdens[1], M*M*M*sizeof(float))) cout << "failure to allocate\n";
//if(cudaSuccess != cudaMalloc(&d_gpres[0], M*M*M*sizeof(float))) cout << "failure to allocate\n";
//if(cudaSuccess != cudaMalloc(&d_gpres[1], M*M*M*sizeof(float))) cout << "failure to allocate\n";
//if(cudaSuccess != cudaMalloc(&d_diverge, M*M*M*sizeof(float))) cout << "failure to allocate\n";
//memset(zeros, 0.0, 4*M*M*M*sizeof(float));
cudaError_t err;
err = cudaMemcpy(d_gvels[0], zeros, 4*M*M*M*sizeof(float), cudaMemcpyHostToDevice); if (err) cout << "failure to memcpy: " << cudaGetErrorString(err) << endl;
err = cudaMemcpy(d_gvels[1], zeros, 4*M*M*M*sizeof(float), cudaMemcpyHostToDevice); if (err) cout << "failure to memcpy: " << cudaGetErrorString(err) << endl;
//err = cudaMemcpy(d_gtemp[0], zeros, M*M*M*sizeof(float), cudaMemcpyHostToDevice); if (err) cout << "failure to memcpy: " << cudaGetErrorString(err) << endl;
//err = cudaMemcpy(d_gtemp[1], zeros, M*M*M*sizeof(float), cudaMemcpyHostToDevice); if (err) cout << "failure to memcpy: " << cudaGetErrorString(err) << endl;
//err = cudaMemcpy(d_gpres[0], zeros, M*M*M*sizeof(float), cudaMemcpyHostToDevice); if (err) cout << "failure to memcpy: " << cudaGetErrorString(err) << endl;
//err = cudaMemcpy(d_gpres[1], zeros, M*M*M*sizeof(float), cudaMemcpyHostToDevice); if (err) cout << "failure to memcpy: " << cudaGetErrorString(err) << endl;
//err = cudaMemcpy(d_diverge, zeros, M*M*M*sizeof(float), cudaMemcpyHostToDevice); if (err) cout << "failure to memcpy: " << cudaGetErrorString(err) << endl;
//
//err = cudaMemcpy(d_gdens[0], zeros, M*M*M*sizeof(float), cudaMemcpyHostToDevice); if (err) cout << "failure to memcpy: " << cudaGetErrorString(err) << endl;
//err = cudaMemcpy(d_gdens[1], zeros, M*M*M*sizeof(float), cudaMemcpyHostToDevice); if (err) cout << "failure to memcpy: " << cudaGetErrorString(err) << endl;
if(cudaSuccess != cudaMemcpy(dverts, verts, 3*N*sizeof(float), cudaMemcpyHostToDevice)) cout << "memcpy fail\n";
if(cudaSuccess != cudaMemcpy(dtimes, times, N*sizeof(float), cudaMemcpyHostToDevice)) cout << "memcpy fail\n";
if(cudaSuccess != cudaMemcpy(dcolors,colors,3*N*sizeof(float), cudaMemcpyHostToDevice)) cout << "memcpy fail\n";
if(cudaSuccess != cudaMemcpy(dpvels, pvels, 3*N*sizeof(float), cudaMemcpyHostToDevice)) cout << "memcpy fail\n";
if (err) quit = true;
//////////////////////////////////////////////////////
starTexture = LoadTexture("star.bmp");
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexEnvi(GL_TEXTURE_2D, GL_TEXTURE_ENV_MODE, GL_REPLACE);
//Timing
int r = 0;
int dr = 0;
int oldr = 0;
//int Pause = 0;
int frames = 0;
//shader
shader = CreateShaderProgGeom((char*)"flame.vert", (char*)"flame.geom", (char*)"flame.frag");
pixlight = CreateShaderProg((char*)"pixlight.vert", (char*)"pixlight.frag");
reshape(w,h);
int startuptime = SDL_GetTicks();
oldr = startuptime;
////////Main Loop////////
//bool quit = false;
try {
while (!quit)
{
//cout << "handling events\n";
quit = handleEvents();
////Physics Timing////
r = SDL_GetTicks();
dr += r - oldr;
while (dr >= 250)
{
// 1000/8 = 125 updates per second
physics(r);
dr -= 250;
}
oldr = r;
display(window, r);
frames += 1;
//quit = true;
}
}
catch (...) {cout << "catch block\n";}
cout << "Shutting Down\n";
cout << "average framerate: " << 1000*(float)frames/(r - startuptime) << endl;
cudaFree(dverts);
cudaFree(dpvels);
cudaFree(dtimes);
cudaFree(dcolors);
cudaFree(d_gvels[0]);
cudaFree(d_gvels[1]);
//cudaFree(d_gtemp[0]);
//cudaFree(d_gtemp[1]);
//cudaFree(d_gdens[0]);
//cudaFree(d_gdens[1]);
//cudaFree(d_gpres[0]);
//cudaFree(d_gpres[1]);
//cudaFree(d_diverge);
delete verts;
delete pvels;
delete times;
delete colors;
delete h_gvels;
//delete h_gtemp;
SDL_Quit();
return 0;
}
|
2bbd7b14dd5196a81d579466dd4184eb8d631173.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define N 16
#define RADIUS 1
#define BLOCK_SIZE 16
#include <float.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
//algoritmo de dilatao em GPU
// incluir valores aleatrios < N no vetor
void draw_random(float *im, int n) {
// initialize random generator with a random seed only once
srand((unsigned int)time(NULL));
static int seed = rand();
// use same seed to draw the same image again on every test
srand(seed);
for (int i = 0; i<RADIUS; i++)
im[i] = im[n-i-1] = 0;
for (int i = RADIUS; i < n - RADIUS; i++) {
im[i] = (float) (rand() % N) ;
}
}
// imprimir vertor
void disp_img(float const *img, int n) {
for (int col = 0; col < n; col++)
printf("%2.0f ", img[col]);
printf("\n");
}
__global__ void kernel(float *in, float *out) {
__shared__ float temp[BLOCK_SIZE + 2 * RADIUS];
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
int lindex = threadIdx.x + RADIUS;
// Read input elements into shared memory
temp[lindex] = in[gindex];
if (threadIdx.x < RADIUS) {
temp[lindex - RADIUS] = in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
// Synchronize (ensure all the data is available)
__syncthreads();
// Apply
float result = in[gindex];
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result = fmaxf(result, temp[lindex + offset]);
// Store the result
out[gindex] = result;
}
int main(void) {
float *in, *out; // host copies of a, b, c
float *d_in, *d_out; // device copies of a, b, c
size_t size = (N + 2*RADIUS) * sizeof(float);
// Alloc space for host copies and setup values
in = (float*) malloc(size);
out = (float*) malloc(size);
draw_random(in, N + 2*RADIUS);
disp_img(in, N + 2*RADIUS);
// Alloc space for device copies
hipMalloc((void **)&d_in, size);
hipMalloc((void **)&d_out, size);
// Copy to device
hipMemcpy(d_in, in, size, hipMemcpyHostToDevice);
// Launch kernel on GPU
hipLaunchKernelGGL(( kernel), dim3(N/BLOCK_SIZE),dim3(BLOCK_SIZE), 0, 0, d_in+RADIUS, d_out+RADIUS);
// Copy result back to host
hipMemcpy(out, d_out, size, hipMemcpyDeviceToHost);
disp_img(out, N + 2*RADIUS);
// Cleanup
free(in); free(out);
hipFree(d_in); hipFree(d_out);
return 0;
}
| 2bbd7b14dd5196a81d579466dd4184eb8d631173.cu | #define N 16
#define RADIUS 1
#define BLOCK_SIZE 16
#include <float.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
//algoritmo de dilatação em GPU
// incluir valores aleatórios < N no vetor
void draw_random(float *im, int n) {
// initialize random generator with a random seed only once
srand((unsigned int)time(NULL));
static int seed = rand();
// use same seed to draw the same image again on every test
srand(seed);
for (int i = 0; i<RADIUS; i++)
im[i] = im[n-i-1] = 0;
for (int i = RADIUS; i < n - RADIUS; i++) {
im[i] = (float) (rand() % N) ;
}
}
// imprimir vertor
void disp_img(float const *img, int n) {
for (int col = 0; col < n; col++)
printf("%2.0f ", img[col]);
printf("\n");
}
__global__ void kernel(float *in, float *out) {
__shared__ float temp[BLOCK_SIZE + 2 * RADIUS];
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
int lindex = threadIdx.x + RADIUS;
// Read input elements into shared memory
temp[lindex] = in[gindex];
if (threadIdx.x < RADIUS) {
temp[lindex - RADIUS] = in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
// Synchronize (ensure all the data is available)
__syncthreads();
// Apply
float result = in[gindex];
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result = fmaxf(result, temp[lindex + offset]);
// Store the result
out[gindex] = result;
}
int main(void) {
float *in, *out; // host copies of a, b, c
float *d_in, *d_out; // device copies of a, b, c
size_t size = (N + 2*RADIUS) * sizeof(float);
// Alloc space for host copies and setup values
in = (float*) malloc(size);
out = (float*) malloc(size);
draw_random(in, N + 2*RADIUS);
disp_img(in, N + 2*RADIUS);
// Alloc space for device copies
cudaMalloc((void **)&d_in, size);
cudaMalloc((void **)&d_out, size);
// Copy to device
cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice);
// Launch kernel on GPU
kernel<<<N/BLOCK_SIZE,BLOCK_SIZE>>>(d_in+RADIUS, d_out+RADIUS);
// Copy result back to host
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost);
disp_img(out, N + 2*RADIUS);
// Cleanup
free(in); free(out);
cudaFree(d_in); cudaFree(d_out);
return 0;
}
|
092b1016bf98cdd6023516f589162aa375822e80.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THHUNN/generic/SpatialConvolutionMM.hip"
#else
#include <ATen/div_rtn.h>
static inline void THNN_(SpatialConvolutionMM_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *gradOutput,
THCTensor *weight, THCTensor *bias,
int kH, int kW, int dH, int dW, int padH, int padW,
int weight_nullable) {
THArgCheck(kW > 0 && kH > 0, 9,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 11,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
if (weight != NULL) {
THCUNN_argCheck(state, !weight->is_empty() && (weight->dim() == 2 || weight->dim() == 4), 5, weight,
"non-empty 2D or 4D weight tensor expected, but got: %s");
if (bias != NULL) {
THCUNN_check_dim_size(state, bias, 1, 0, weight->size(0));
}
} else if (!weight_nullable) {
THError("weight tensor is expected to be non-nullable");
}
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
// Allow for empty batch size but not other dimensions
bool valid_empty = false;
if (ndim == 3) {
valid_empty = input->size(0) == 0 && input->size(1) != 0 && input->size(2) != 0;
} else if (ndim == 4) {
valid_empty = input->size(0) == 0 && input->size(1) != 0 && input->size(2) != 0 && input->size(3) != 0;
}
THCUNN_argCheck(state, (!input->is_empty() || valid_empty) && (ndim == 3 || ndim == 4), 2, input,
"non-empty 3D or 4D input tensor expected but got: %s");
int64_t inputHeight = input->size(dimh);
int64_t inputWidth = input->size(dimw);
int64_t exactInputHeight = inputHeight + 2 * padH;
int64_t exactInputWidth = inputWidth + 2 * padW;
if (exactInputHeight < kH || exactInputWidth < kW) {
THError("Calculated padded input size per channel: (%ld x %ld). "
"Kernel size: (%d x %d). Kernel size can't be greater than actual input size",
exactInputHeight, exactInputWidth, kH, kW);
}
int64_t outputHeight = div_rtn<int64_t>(exactInputHeight - kH, dH) + 1;
int64_t outputWidth = div_rtn<int64_t>(exactInputWidth - kW, dW) + 1;
if (outputWidth < 1 || outputHeight < 1) {
THError("Given input size per channel: (%ld x %ld). "
"Calculated output size per channel: (%ld x %ld). Output size is too small",
inputHeight, inputWidth, outputHeight, outputWidth);
}
if (weight != NULL) {
int64_t nInputPlane = weight->size(1);
if (weight->dim() == 2) {
nInputPlane /= (kH * kW);
}
THCUNN_check_dim_size(state, input, ndim, dimf, nInputPlane);
}
if (gradOutput != NULL) {
if (weight != NULL) {
int64_t nOutputPlane = weight->size(0);
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
} else if (bias != NULL) {
int64_t nOutputPlane = THTensor_sizeLegacyNoScalars(bias, 0);
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
}
THCUNN_check_dim_size(state, gradOutput, ndim, dimh, outputHeight);
THCUNN_check_dim_size(state, gradOutput, ndim, dimw, outputWidth);
}
}
static THCTensor* THNN_(newViewWeightMM2d)(THCState *state, THCTensor *weight) {
weight = THCTensor_(newContiguous)(state, weight);
if (weight->dim() == 4) {
int64_t s1 = weight->size(0);
int64_t s2 = weight->size(1) * weight->size(2) * weight->size(3);
THCTensor *old_weight = weight;
weight = THCTensor_(newWithStorage2d)(state, THTensor_getStoragePtr(weight), weight->storage_offset(),
s1, -1, s2, -1);
THCTensor_(free)(state, old_weight);
}
return weight;
}
void THNN_(SpatialConvolutionMM_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight,
THCTensor *bias,
THCTensor *columns,
THCTensor *ones,
int kW, int kH,
int dW, int dH,
int padW, int padH) {
THCUNN_assertSameGPU(state, 5, input, output, weight, columns, ones);
if (bias) {
THCUNN_assertSameGPU(state, 2, weight, bias);
}
weight = THNN_(newViewWeightMM2d)(state, weight);
THNN_(SpatialConvolutionMM_shapeCheck)
(state, input, NULL, weight, bias, kH, kW, dH, dW, padH, padW, 0);
THArgCheck(!bias || THCTensor_(isContiguous)(state, bias), 5,
"bias tensor has to be contiguous");
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
int64_t nInputPlane = input->size(dimf);
int64_t inputHeight = input->size(dimh);
int64_t inputWidth = input->size(dimw);
int64_t nOutputPlane = weight->size(0);
int64_t outputHeight = (inputHeight + 2*padH - kH) / dH + 1;
int64_t outputWidth = (inputWidth + 2*padW - kW) / dW + 1;
input = THCTensor_(newContiguous)(state, input);
int is_batch = 1;
if (input->dim() == 3) {
// Force batch
is_batch = 0;
THCTensor_(resize4d)(state, input, 1, input->size(0), input->size(1), input->size(2));
}
// Batch size + input planes
int64_t batchSize = input->size(0);
// Resize output
THCTensor_(resize4d)(state, output, batchSize, nOutputPlane, outputHeight, outputWidth);
// we can bypass im2col in this case
bool direct_gemm = kW == 1 && kH == 1 && dW == 1 && dH == 1 && padW == 0 && padH == 0;
if (!direct_gemm) {
// Resize temporary columns
THCTensor_(resize2d)(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth);
}
if (bias) {
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets increased,
// and always contains ones.
if (ones->dim() != 2 || ones->size(0)*ones->size(1) < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCTensor_(resize2d)(state, ones, outputHeight, outputWidth);
THCTensor_(fill)(state, ones, ScalarConvert<int, scalar_t>::to(1));
}
}
// Helpers
THCTensor *input_n = THCTensor_(new)(state);
THCTensor *output_n = THCTensor_(new)(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCTensor_(select)(state, input_n, input, 0, elt);
THCTensor_(select)(state, output_n, output, 0, elt);
// Do Bias first:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = nOutputPlane;
int64_t n_ = outputHeight * outputWidth;
int64_t k_ = 1;
scalar_t beta = ScalarConvert<int, scalar_t>::to(0);
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
if (bias) {
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemm(
#elif defined(THC_REAL_IS_HALF)
THCudaBlas_Hgemm(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(
#endif
state,
't', 'n',
n_, m_, k_,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, ones), k_,
THCTensor_(data)(state, bias), k_,
ScalarConvert<int, scalar_t>::to(0),
THCTensor_(data)(state, output_n), n_
);
beta = ScalarConvert<int, scalar_t>::to(1);
}
if (!direct_gemm) {
// Extract columns:
at::native::im2col<scalar_t>(
THCState_getCurrentStream(state),
THCTensor_(data)(state, input_n),
nInputPlane, inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW, padH, padW, dH, dW,
1, 1,
columns->data<scalar_t>()
);
}
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = nOutputPlane;
int64_t n = outputHeight*outputWidth;
int64_t k = nInputPlane*kH*kW;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemm(
#elif defined(THC_REAL_IS_HALF)
THCudaBlas_Hgemm(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(
#endif
state,
'n', 'n',
n, m, k,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, direct_gemm ? input_n : columns), n,
THCTensor_(data)(state, weight), k,
beta,
THCTensor_(data)(state, output_n), n
);
}
// Free
THCTensor_(free)(state, input_n);
THCTensor_(free)(state, output_n);
// Resize output
if (is_batch == 0) {
THCTensor_(resize3d)(state, output, nOutputPlane, outputHeight, outputWidth);
THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth);
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, weight);
}
void THNN_(SpatialConvolutionMM_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
THCTensor *gradColumns,
THCTensor *ones,
int kW, int kH,
int dW, int dH,
int padW, int padH) {
THCUNN_assertSameGPU(state, 5, input, gradOutput, weight,
gradColumns, gradInput);
weight = THNN_(newViewWeightMM2d)(state, weight);
THNN_(SpatialConvolutionMM_shapeCheck)
(state, input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW, 0);
// Params
int nInputPlane = weight->dim() == 2 ? weight->size(1)/(kW*kH) : weight->size(1);
int nOutputPlane = weight->size(0);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int is_batch = 1;
if (input->dim() == 3) {
// Force batch
is_batch = 0;
THCTensor_(resize4d)(state, input, 1, input->size(0), input->size(1), input->size(2));
THCTensor_(resize4d)(state, gradOutput, 1, gradOutput->size(0), gradOutput->size(1), gradOutput->size(2));
}
int64_t inputWidth = input->size(3);
int64_t inputHeight = input->size(2);
int64_t outputWidth = (inputWidth + 2*padW - kW) / dW + 1;
int64_t outputHeight = (inputHeight + 2*padH - kH) / dH + 1;
// Batch size + input planes
int64_t batchSize = input->size(0);
// Resize output
THCTensor_(resize4d)(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth);
// Resize temporary columns
THCTensor_(resize2d)(state, gradColumns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCTensor *gradInput_n = THCTensor_(new)(state);
THCTensor *gradOutput_n = THCTensor_(new)(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per sample:
THCTensor_(select)(state, gradInput_n, gradInput, 0, elt);
THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = nInputPlane*kW*kH;
int64_t n = gradColumns->size(1);
int64_t k = nOutputPlane;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemm(
#elif defined(THC_REAL_IS_HALF)
THCudaBlas_Hgemm(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(
#endif
state,
'n', 't',
n, m, k,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, gradOutput_n), n,
THCTensor_(data)(state, weight), m,
ScalarConvert<int, scalar_t>::to(0),
THCTensor_(data)(state, gradColumns), n
);
// Unpack columns back into input:
at::native::col2im<scalar_t, accreal>(
THCState_getCurrentStream(state),
THCTensor_(data)(state, gradColumns),
nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW,
1, 1, THCTensor_(data)(state, gradInput_n)
);
}
// Free
THCTensor_(free)(state, gradInput_n);
THCTensor_(free)(state, gradOutput_n);
THCTensor_(free)(state, weight);
// Resize output
if (is_batch == 0) {
THCTensor_(resize3d)(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth);
THCTensor_(resize3d)(state, gradInput, nInputPlane, inputHeight, inputWidth);
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
void THNN_(SpatialConvolutionMM_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradWeight,
THCTensor *gradBias,
THCTensor *columns,
THCTensor *ones,
int kW, int kH,
int dW, int dH,
int padW, int padH,
accreal scale_) {
scalar_t scale = ScalarConvert<accreal, scalar_t>::to(scale_);
THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, gradBias, columns, ones);
if (gradWeight) {
THArgCheck(THCTensor_(isContiguous)(state, gradWeight), 4, "gradWeight needs to be contiguous");
gradWeight = THNN_(newViewWeightMM2d)(state, gradWeight);
}
if (gradBias) {
THArgCheck(THCTensor_(isContiguous)(state, gradBias), 5, "gradBias needs to be contiguous");
THArgCheck(THCTensor_(isContiguous)(state, ones), 7, "ones needs to be contiguous");
}
THNN_(SpatialConvolutionMM_shapeCheck)
(state, input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW, 1);
// Params
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int is_batch = 1;
if (input->dim() == 3) {
// Force batch
is_batch = 0;
THCTensor_(resize4d)(state, input, 1, input->size(0), input->size(1), input->size(2));
THCTensor_(resize4d)(state, gradOutput, 1, gradOutput->size(0), gradOutput->size(1), gradOutput->size(2));
}
int64_t nInputPlane = input->size(1);
int64_t nOutputPlane = gradOutput->size(1);
int64_t inputWidth = input->size(3);
int64_t inputHeight = input->size(2);
int64_t outputWidth = (inputWidth + 2*padW - kW) / dW + 1;
int64_t outputHeight = (inputHeight + 2*padH - kH) / dH + 1;
// Batch size + input planes
int64_t batchSize = input->size(0);
// Define a buffer of ones, for bias accumulation
if (ones->dim() != 2 || ones->size(0)*ones->size(1) < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCTensor_(resize2d)(state, ones, outputHeight, outputWidth);
THCTensor_(fill)(state, ones, ScalarConvert<int, scalar_t>::to(1));
}
// Resize temporary columns
THCTensor_(resize2d)(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCTensor *input_n = THCTensor_(new)(state);
THCTensor *gradOutput_n = THCTensor_(new)(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt);
// Do Weight:
if (gradWeight) {
// Matrix mulitply per output:
THCTensor_(select)(state, input_n, input, 0, elt);
// Extract columns:
at::native::im2col<scalar_t>(
THCState_getCurrentStream(state),
THCTensor_(data)(state, input_n),
nInputPlane, inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW, padH, padW, dH, dW,
1, 1,
columns->data<scalar_t>()
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = nOutputPlane;
int64_t n = nInputPlane*kW*kH;
int64_t k = columns->size(1);
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemm(
#elif defined(THC_REAL_IS_HALF)
THCudaBlas_Hgemm(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(
#endif
state,
't', 'n',
n, m, k,
scale,
THCTensor_(data)(state, columns), k,
THCTensor_(data)(state, gradOutput_n), k,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, gradWeight), n
);
}
// Do Bias:
if (gradBias) {
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = nOutputPlane;
int64_t k_ = outputHeight * outputWidth;
// Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemv(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemv(
#endif
state,
't',
k_, m_,
scale,
THCTensor_(data)(state, gradOutput_n), k_,
THCTensor_(data)(state, ones), 1,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, gradBias), 1
);
#endif
#ifdef THC_REAL_IS_HALF
THCudaBlas_Hgemm(
state,
't', 'n',
m_, 1, k_,
scale,
THCTensor_(data)(state, gradOutput_n), k_,
THCTensor_(data)(state, ones), k_,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, gradBias), m_
);
#endif
}
}
// Free
THCTensor_(free)(state, input_n);
THCTensor_(free)(state, gradOutput_n);
if (gradWeight)
THCTensor_(free)(state, gradWeight);
// Resize
if (is_batch == 0) {
THCTensor_(resize3d)(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth);
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
#endif
| 092b1016bf98cdd6023516f589162aa375822e80.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THCUNN/generic/SpatialConvolutionMM.cu"
#else
#include <ATen/div_rtn.h>
static inline void THNN_(SpatialConvolutionMM_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *gradOutput,
THCTensor *weight, THCTensor *bias,
int kH, int kW, int dH, int dW, int padH, int padW,
int weight_nullable) {
THArgCheck(kW > 0 && kH > 0, 9,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 11,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
if (weight != NULL) {
THCUNN_argCheck(state, !weight->is_empty() && (weight->dim() == 2 || weight->dim() == 4), 5, weight,
"non-empty 2D or 4D weight tensor expected, but got: %s");
if (bias != NULL) {
THCUNN_check_dim_size(state, bias, 1, 0, weight->size(0));
}
} else if (!weight_nullable) {
THError("weight tensor is expected to be non-nullable");
}
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
// Allow for empty batch size but not other dimensions
bool valid_empty = false;
if (ndim == 3) {
valid_empty = input->size(0) == 0 && input->size(1) != 0 && input->size(2) != 0;
} else if (ndim == 4) {
valid_empty = input->size(0) == 0 && input->size(1) != 0 && input->size(2) != 0 && input->size(3) != 0;
}
THCUNN_argCheck(state, (!input->is_empty() || valid_empty) && (ndim == 3 || ndim == 4), 2, input,
"non-empty 3D or 4D input tensor expected but got: %s");
int64_t inputHeight = input->size(dimh);
int64_t inputWidth = input->size(dimw);
int64_t exactInputHeight = inputHeight + 2 * padH;
int64_t exactInputWidth = inputWidth + 2 * padW;
if (exactInputHeight < kH || exactInputWidth < kW) {
THError("Calculated padded input size per channel: (%ld x %ld). "
"Kernel size: (%d x %d). Kernel size can't be greater than actual input size",
exactInputHeight, exactInputWidth, kH, kW);
}
int64_t outputHeight = div_rtn<int64_t>(exactInputHeight - kH, dH) + 1;
int64_t outputWidth = div_rtn<int64_t>(exactInputWidth - kW, dW) + 1;
if (outputWidth < 1 || outputHeight < 1) {
THError("Given input size per channel: (%ld x %ld). "
"Calculated output size per channel: (%ld x %ld). Output size is too small",
inputHeight, inputWidth, outputHeight, outputWidth);
}
if (weight != NULL) {
int64_t nInputPlane = weight->size(1);
if (weight->dim() == 2) {
nInputPlane /= (kH * kW);
}
THCUNN_check_dim_size(state, input, ndim, dimf, nInputPlane);
}
if (gradOutput != NULL) {
if (weight != NULL) {
int64_t nOutputPlane = weight->size(0);
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
} else if (bias != NULL) {
int64_t nOutputPlane = THTensor_sizeLegacyNoScalars(bias, 0);
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
}
THCUNN_check_dim_size(state, gradOutput, ndim, dimh, outputHeight);
THCUNN_check_dim_size(state, gradOutput, ndim, dimw, outputWidth);
}
}
static THCTensor* THNN_(newViewWeightMM2d)(THCState *state, THCTensor *weight) {
weight = THCTensor_(newContiguous)(state, weight);
if (weight->dim() == 4) {
int64_t s1 = weight->size(0);
int64_t s2 = weight->size(1) * weight->size(2) * weight->size(3);
THCTensor *old_weight = weight;
weight = THCTensor_(newWithStorage2d)(state, THTensor_getStoragePtr(weight), weight->storage_offset(),
s1, -1, s2, -1);
THCTensor_(free)(state, old_weight);
}
return weight;
}
void THNN_(SpatialConvolutionMM_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight,
THCTensor *bias,
THCTensor *columns,
THCTensor *ones,
int kW, int kH,
int dW, int dH,
int padW, int padH) {
THCUNN_assertSameGPU(state, 5, input, output, weight, columns, ones);
if (bias) {
THCUNN_assertSameGPU(state, 2, weight, bias);
}
weight = THNN_(newViewWeightMM2d)(state, weight);
THNN_(SpatialConvolutionMM_shapeCheck)
(state, input, NULL, weight, bias, kH, kW, dH, dW, padH, padW, 0);
THArgCheck(!bias || THCTensor_(isContiguous)(state, bias), 5,
"bias tensor has to be contiguous");
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
int64_t nInputPlane = input->size(dimf);
int64_t inputHeight = input->size(dimh);
int64_t inputWidth = input->size(dimw);
int64_t nOutputPlane = weight->size(0);
int64_t outputHeight = (inputHeight + 2*padH - kH) / dH + 1;
int64_t outputWidth = (inputWidth + 2*padW - kW) / dW + 1;
input = THCTensor_(newContiguous)(state, input);
int is_batch = 1;
if (input->dim() == 3) {
// Force batch
is_batch = 0;
THCTensor_(resize4d)(state, input, 1, input->size(0), input->size(1), input->size(2));
}
// Batch size + input planes
int64_t batchSize = input->size(0);
// Resize output
THCTensor_(resize4d)(state, output, batchSize, nOutputPlane, outputHeight, outputWidth);
// we can bypass im2col in this case
bool direct_gemm = kW == 1 && kH == 1 && dW == 1 && dH == 1 && padW == 0 && padH == 0;
if (!direct_gemm) {
// Resize temporary columns
THCTensor_(resize2d)(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth);
}
if (bias) {
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets increased,
// and always contains ones.
if (ones->dim() != 2 || ones->size(0)*ones->size(1) < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCTensor_(resize2d)(state, ones, outputHeight, outputWidth);
THCTensor_(fill)(state, ones, ScalarConvert<int, scalar_t>::to(1));
}
}
// Helpers
THCTensor *input_n = THCTensor_(new)(state);
THCTensor *output_n = THCTensor_(new)(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCTensor_(select)(state, input_n, input, 0, elt);
THCTensor_(select)(state, output_n, output, 0, elt);
// Do Bias first:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = nOutputPlane;
int64_t n_ = outputHeight * outputWidth;
int64_t k_ = 1;
scalar_t beta = ScalarConvert<int, scalar_t>::to(0);
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
if (bias) {
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemm(
#elif defined(THC_REAL_IS_HALF)
THCudaBlas_Hgemm(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(
#endif
state,
't', 'n',
n_, m_, k_,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, ones), k_,
THCTensor_(data)(state, bias), k_,
ScalarConvert<int, scalar_t>::to(0),
THCTensor_(data)(state, output_n), n_
);
beta = ScalarConvert<int, scalar_t>::to(1);
}
if (!direct_gemm) {
// Extract columns:
at::native::im2col<scalar_t>(
THCState_getCurrentStream(state),
THCTensor_(data)(state, input_n),
nInputPlane, inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW, padH, padW, dH, dW,
1, 1,
columns->data<scalar_t>()
);
}
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = nOutputPlane;
int64_t n = outputHeight*outputWidth;
int64_t k = nInputPlane*kH*kW;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemm(
#elif defined(THC_REAL_IS_HALF)
THCudaBlas_Hgemm(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(
#endif
state,
'n', 'n',
n, m, k,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, direct_gemm ? input_n : columns), n,
THCTensor_(data)(state, weight), k,
beta,
THCTensor_(data)(state, output_n), n
);
}
// Free
THCTensor_(free)(state, input_n);
THCTensor_(free)(state, output_n);
// Resize output
if (is_batch == 0) {
THCTensor_(resize3d)(state, output, nOutputPlane, outputHeight, outputWidth);
THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth);
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, weight);
}
void THNN_(SpatialConvolutionMM_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
THCTensor *gradColumns,
THCTensor *ones,
int kW, int kH,
int dW, int dH,
int padW, int padH) {
THCUNN_assertSameGPU(state, 5, input, gradOutput, weight,
gradColumns, gradInput);
weight = THNN_(newViewWeightMM2d)(state, weight);
THNN_(SpatialConvolutionMM_shapeCheck)
(state, input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW, 0);
// Params
int nInputPlane = weight->dim() == 2 ? weight->size(1)/(kW*kH) : weight->size(1);
int nOutputPlane = weight->size(0);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int is_batch = 1;
if (input->dim() == 3) {
// Force batch
is_batch = 0;
THCTensor_(resize4d)(state, input, 1, input->size(0), input->size(1), input->size(2));
THCTensor_(resize4d)(state, gradOutput, 1, gradOutput->size(0), gradOutput->size(1), gradOutput->size(2));
}
int64_t inputWidth = input->size(3);
int64_t inputHeight = input->size(2);
int64_t outputWidth = (inputWidth + 2*padW - kW) / dW + 1;
int64_t outputHeight = (inputHeight + 2*padH - kH) / dH + 1;
// Batch size + input planes
int64_t batchSize = input->size(0);
// Resize output
THCTensor_(resize4d)(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth);
// Resize temporary columns
THCTensor_(resize2d)(state, gradColumns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCTensor *gradInput_n = THCTensor_(new)(state);
THCTensor *gradOutput_n = THCTensor_(new)(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per sample:
THCTensor_(select)(state, gradInput_n, gradInput, 0, elt);
THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = nInputPlane*kW*kH;
int64_t n = gradColumns->size(1);
int64_t k = nOutputPlane;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemm(
#elif defined(THC_REAL_IS_HALF)
THCudaBlas_Hgemm(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(
#endif
state,
'n', 't',
n, m, k,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, gradOutput_n), n,
THCTensor_(data)(state, weight), m,
ScalarConvert<int, scalar_t>::to(0),
THCTensor_(data)(state, gradColumns), n
);
// Unpack columns back into input:
at::native::col2im<scalar_t, accreal>(
THCState_getCurrentStream(state),
THCTensor_(data)(state, gradColumns),
nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW,
1, 1, THCTensor_(data)(state, gradInput_n)
);
}
// Free
THCTensor_(free)(state, gradInput_n);
THCTensor_(free)(state, gradOutput_n);
THCTensor_(free)(state, weight);
// Resize output
if (is_batch == 0) {
THCTensor_(resize3d)(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth);
THCTensor_(resize3d)(state, gradInput, nInputPlane, inputHeight, inputWidth);
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
void THNN_(SpatialConvolutionMM_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradWeight,
THCTensor *gradBias,
THCTensor *columns,
THCTensor *ones,
int kW, int kH,
int dW, int dH,
int padW, int padH,
accreal scale_) {
scalar_t scale = ScalarConvert<accreal, scalar_t>::to(scale_);
THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, gradBias, columns, ones);
if (gradWeight) {
THArgCheck(THCTensor_(isContiguous)(state, gradWeight), 4, "gradWeight needs to be contiguous");
gradWeight = THNN_(newViewWeightMM2d)(state, gradWeight);
}
if (gradBias) {
THArgCheck(THCTensor_(isContiguous)(state, gradBias), 5, "gradBias needs to be contiguous");
THArgCheck(THCTensor_(isContiguous)(state, ones), 7, "ones needs to be contiguous");
}
THNN_(SpatialConvolutionMM_shapeCheck)
(state, input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW, 1);
// Params
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int is_batch = 1;
if (input->dim() == 3) {
// Force batch
is_batch = 0;
THCTensor_(resize4d)(state, input, 1, input->size(0), input->size(1), input->size(2));
THCTensor_(resize4d)(state, gradOutput, 1, gradOutput->size(0), gradOutput->size(1), gradOutput->size(2));
}
int64_t nInputPlane = input->size(1);
int64_t nOutputPlane = gradOutput->size(1);
int64_t inputWidth = input->size(3);
int64_t inputHeight = input->size(2);
int64_t outputWidth = (inputWidth + 2*padW - kW) / dW + 1;
int64_t outputHeight = (inputHeight + 2*padH - kH) / dH + 1;
// Batch size + input planes
int64_t batchSize = input->size(0);
// Define a buffer of ones, for bias accumulation
if (ones->dim() != 2 || ones->size(0)*ones->size(1) < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCTensor_(resize2d)(state, ones, outputHeight, outputWidth);
THCTensor_(fill)(state, ones, ScalarConvert<int, scalar_t>::to(1));
}
// Resize temporary columns
THCTensor_(resize2d)(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCTensor *input_n = THCTensor_(new)(state);
THCTensor *gradOutput_n = THCTensor_(new)(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt);
// Do Weight:
if (gradWeight) {
// Matrix mulitply per output:
THCTensor_(select)(state, input_n, input, 0, elt);
// Extract columns:
at::native::im2col<scalar_t>(
THCState_getCurrentStream(state),
THCTensor_(data)(state, input_n),
nInputPlane, inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW, padH, padW, dH, dW,
1, 1,
columns->data<scalar_t>()
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = nOutputPlane;
int64_t n = nInputPlane*kW*kH;
int64_t k = columns->size(1);
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemm(
#elif defined(THC_REAL_IS_HALF)
THCudaBlas_Hgemm(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(
#endif
state,
't', 'n',
n, m, k,
scale,
THCTensor_(data)(state, columns), k,
THCTensor_(data)(state, gradOutput_n), k,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, gradWeight), n
);
}
// Do Bias:
if (gradBias) {
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = nOutputPlane;
int64_t k_ = outputHeight * outputWidth;
// Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemv(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemv(
#endif
state,
't',
k_, m_,
scale,
THCTensor_(data)(state, gradOutput_n), k_,
THCTensor_(data)(state, ones), 1,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, gradBias), 1
);
#endif
#ifdef THC_REAL_IS_HALF
THCudaBlas_Hgemm(
state,
't', 'n',
m_, 1, k_,
scale,
THCTensor_(data)(state, gradOutput_n), k_,
THCTensor_(data)(state, ones), k_,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, gradBias), m_
);
#endif
}
}
// Free
THCTensor_(free)(state, input_n);
THCTensor_(free)(state, gradOutput_n);
if (gradWeight)
THCTensor_(free)(state, gradWeight);
// Resize
if (is_batch == 0) {
THCTensor_(resize3d)(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth);
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
#endif
|
a9ff96ec76e193d87bdd75567558eaaf0c384d2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define SCALEDOWN_W 160
__global__ void ScaleDown(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch)
{
__shared__ float inrow[SCALEDOWN_W+4];
__shared__ float brow[5*(SCALEDOWN_W/2)];
__shared__ int yRead[SCALEDOWN_H+4];
__shared__ int yWrite[SCALEDOWN_H+4];
#define dx2 (SCALEDOWN_W/2)
const int tx = threadIdx.x;
const int tx0 = tx + 0*dx2;
const int tx1 = tx + 1*dx2;
const int tx2 = tx + 2*dx2;
const int tx3 = tx + 3*dx2;
const int tx4 = tx + 4*dx2;
const int xStart = blockIdx.x*SCALEDOWN_W;
const int yStart = blockIdx.y*SCALEDOWN_H;
const int xWrite = xStart/2 + tx;
const float *k = d_Kernel1;
if (tx<SCALEDOWN_H+4) {
int y = yStart + tx - 1;
y = (y<0 ? 0 : y);
y = (y>=height ? height-1 : y);
yRead[tx] = y*pitch;
yWrite[tx] = (yStart + tx - 4)/2 * newpitch;
}
__syncthreads();
int xRead = xStart + tx - 2;
xRead = (xRead<0 ? 0 : xRead);
xRead = (xRead>=width ? width-1 : xRead);
for (int dy=0;dy<SCALEDOWN_H+4;dy+=5) {
inrow[tx] = d_Data[yRead[dy+0] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx0] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=4 && !(dy&1))
d_Result[yWrite[dy+0] + xWrite] = k[2]*brow[tx2] + k[0]*(brow[tx0]+brow[tx4]) + k[1]*(brow[tx1]+brow[tx3]);
if (dy<(SCALEDOWN_H+3)) {
inrow[tx] = d_Data[yRead[dy+1] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx1] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=3 && (dy&1))
d_Result[yWrite[dy+1] + xWrite] = k[2]*brow[tx3] + k[0]*(brow[tx1]+brow[tx0]) + k[1]*(brow[tx2]+brow[tx4]);
}
if (dy<(SCALEDOWN_H+2)) {
inrow[tx] = d_Data[yRead[dy+2] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx2] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=2 && !(dy&1))
d_Result[yWrite[dy+2] + xWrite] = k[2]*brow[tx4] + k[0]*(brow[tx2]+brow[tx1]) + k[1]*(brow[tx3]+brow[tx0]);
}
if (dy<(SCALEDOWN_H+1)) {
inrow[tx] = d_Data[yRead[dy+3] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx3] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=1 && (dy&1))
d_Result[yWrite[dy+3] + xWrite] = k[2]*brow[tx0] + k[0]*(brow[tx3]+brow[tx2]) + k[1]*(brow[tx4]+brow[tx1]);
}
if (dy<SCALEDOWN_H) {
inrow[tx] = d_Data[yRead[dy+4] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx4] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && !(dy&1))
d_Result[yWrite[dy+4] + xWrite] = k[2]*brow[tx1] + k[0]*(brow[tx4]+brow[tx3]) + k[1]*(brow[tx0]+brow[tx2]);
}
__syncthreads();
}
} | a9ff96ec76e193d87bdd75567558eaaf0c384d2c.cu | #define SCALEDOWN_W 160
__global__ void ScaleDown(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch)
{
__shared__ float inrow[SCALEDOWN_W+4];
__shared__ float brow[5*(SCALEDOWN_W/2)];
__shared__ int yRead[SCALEDOWN_H+4];
__shared__ int yWrite[SCALEDOWN_H+4];
#define dx2 (SCALEDOWN_W/2)
const int tx = threadIdx.x;
const int tx0 = tx + 0*dx2;
const int tx1 = tx + 1*dx2;
const int tx2 = tx + 2*dx2;
const int tx3 = tx + 3*dx2;
const int tx4 = tx + 4*dx2;
const int xStart = blockIdx.x*SCALEDOWN_W;
const int yStart = blockIdx.y*SCALEDOWN_H;
const int xWrite = xStart/2 + tx;
const float *k = d_Kernel1;
if (tx<SCALEDOWN_H+4) {
int y = yStart + tx - 1;
y = (y<0 ? 0 : y);
y = (y>=height ? height-1 : y);
yRead[tx] = y*pitch;
yWrite[tx] = (yStart + tx - 4)/2 * newpitch;
}
__syncthreads();
int xRead = xStart + tx - 2;
xRead = (xRead<0 ? 0 : xRead);
xRead = (xRead>=width ? width-1 : xRead);
for (int dy=0;dy<SCALEDOWN_H+4;dy+=5) {
inrow[tx] = d_Data[yRead[dy+0] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx0] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=4 && !(dy&1))
d_Result[yWrite[dy+0] + xWrite] = k[2]*brow[tx2] + k[0]*(brow[tx0]+brow[tx4]) + k[1]*(brow[tx1]+brow[tx3]);
if (dy<(SCALEDOWN_H+3)) {
inrow[tx] = d_Data[yRead[dy+1] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx1] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=3 && (dy&1))
d_Result[yWrite[dy+1] + xWrite] = k[2]*brow[tx3] + k[0]*(brow[tx1]+brow[tx0]) + k[1]*(brow[tx2]+brow[tx4]);
}
if (dy<(SCALEDOWN_H+2)) {
inrow[tx] = d_Data[yRead[dy+2] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx2] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=2 && !(dy&1))
d_Result[yWrite[dy+2] + xWrite] = k[2]*brow[tx4] + k[0]*(brow[tx2]+brow[tx1]) + k[1]*(brow[tx3]+brow[tx0]);
}
if (dy<(SCALEDOWN_H+1)) {
inrow[tx] = d_Data[yRead[dy+3] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx3] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=1 && (dy&1))
d_Result[yWrite[dy+3] + xWrite] = k[2]*brow[tx0] + k[0]*(brow[tx3]+brow[tx2]) + k[1]*(brow[tx4]+brow[tx1]);
}
if (dy<SCALEDOWN_H) {
inrow[tx] = d_Data[yRead[dy+4] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx4] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && !(dy&1))
d_Result[yWrite[dy+4] + xWrite] = k[2]*brow[tx1] + k[0]*(brow[tx4]+brow[tx3]) + k[1]*(brow[tx0]+brow[tx2]);
}
__syncthreads();
}
} |
4749d4cd3712de816811f6adaaf61f5e795074cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/sparse/convert/csr.cuh>
#include <raft/sparse/coo.hpp>
#include <iostream>
namespace raft {
namespace sparse {
/**************************** sorted COO to CSR ****************************/
template <typename T>
struct SparseConvertCSRInputs {
int m, n, nnz;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const SparseConvertCSRInputs<T>& dims)
{
return os;
}
template <typename T>
class SparseConvertCSRTest : public ::testing::TestWithParam<SparseConvertCSRInputs<T>> {
protected:
void SetUp() override {}
void TearDown() override {}
protected:
SparseConvertCSRInputs<T> params;
};
const std::vector<SparseConvertCSRInputs<float>> inputsf = {{5, 10, 5, 1234ULL}};
typedef SparseConvertCSRTest<float> SortedCOOToCSR;
TEST_P(SortedCOOToCSR, Result)
{
hipStream_t stream;
hipStreamCreate(&stream);
int nnz = 8;
int* in_h = new int[nnz]{0, 0, 1, 1, 2, 2, 3, 3};
int* exp_h = new int[4]{0, 2, 4, 6};
rmm::device_uvector<int> in(nnz, stream);
rmm::device_uvector<int> exp(4, stream);
rmm::device_uvector<int> out(4, stream);
RAFT_CUDA_TRY(hipMemsetAsync(in.data(), 0, in.size() * sizeof(int), stream));
RAFT_CUDA_TRY(hipMemsetAsync(exp.data(), 0, exp.size() * sizeof(int), stream));
RAFT_CUDA_TRY(hipMemsetAsync(out.data(), 0, out.size() * sizeof(int), stream));
raft::update_device(in.data(), in_h, nnz, stream);
raft::update_device(exp.data(), exp_h, 4, stream);
convert::sorted_coo_to_csr<int>(in.data(), nnz, out.data(), 4, stream);
ASSERT_TRUE(raft::devArrMatch<int>(out.data(), exp.data(), 4, raft::Compare<int>(), stream));
hipStreamDestroy(stream);
delete[] in_h;
delete[] exp_h;
}
INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest, SortedCOOToCSR, ::testing::ValuesIn(inputsf));
/******************************** adj graph ********************************/
template <typename index_t>
__global__ void init_adj_kernel(bool* adj, index_t num_rows, index_t num_cols, index_t divisor)
{
index_t r = blockDim.y * blockIdx.y + threadIdx.y;
index_t c = blockDim.x * blockIdx.x + threadIdx.x;
for (; r < num_rows; r += gridDim.y * blockDim.y) {
for (; c < num_cols; c += gridDim.x * blockDim.x) {
adj[r * num_cols + c] = c % divisor == 0;
}
}
}
template <typename index_t>
void init_adj(bool* adj, index_t num_rows, index_t num_cols, index_t divisor, hipStream_t stream)
{
// adj matrix: element a_ij is set to one if j is divisible by divisor.
dim3 block(32, 32);
const index_t max_y_grid_dim = 65535;
dim3 grid(num_cols / 32 + 1, (int)min(num_rows / 32 + 1, max_y_grid_dim));
hipLaunchKernelGGL(( init_adj_kernel<index_t>), dim3(grid), dim3(block), 0, stream, adj, num_rows, num_cols, divisor);
RAFT_CHECK_CUDA(stream);
}
template <typename index_t>
struct CSRAdjGraphInputs {
index_t n_rows;
index_t n_cols;
index_t divisor;
};
template <typename index_t>
class CSRAdjGraphTest : public ::testing::TestWithParam<CSRAdjGraphInputs<index_t>> {
public:
CSRAdjGraphTest()
: stream(resource::get_cuda_stream(handle)),
params(::testing::TestWithParam<CSRAdjGraphInputs<index_t>>::GetParam()),
adj(params.n_rows * params.n_cols, stream),
row_ind(params.n_rows, stream),
row_counters(params.n_rows, stream),
col_ind(params.n_rows * params.n_cols, stream),
row_ind_host(params.n_rows)
{
}
protected:
void SetUp() override
{
// Initialize adj matrix: element a_ij equals one if j is divisible by
// params.divisor.
init_adj(adj.data(), params.n_rows, params.n_cols, params.divisor, stream);
// Initialize row_ind
for (size_t i = 0; i < row_ind_host.size(); ++i) {
size_t nnz_per_row = raft::ceildiv(params.n_cols, params.divisor);
row_ind_host[i] = nnz_per_row * i;
}
raft::update_device(row_ind.data(), row_ind_host.data(), row_ind.size(), stream);
// Initialize result to 1, so we can catch any errors.
RAFT_CUDA_TRY(hipMemsetAsync(col_ind.data(), 1, col_ind.size() * sizeof(index_t), stream));
}
void Run()
{
convert::adj_to_csr<index_t>(handle,
adj.data(),
row_ind.data(),
params.n_rows,
params.n_cols,
row_counters.data(),
col_ind.data());
std::vector<index_t> col_ind_host(col_ind.size());
raft::update_host(col_ind_host.data(), col_ind.data(), col_ind.size(), stream);
std::vector<index_t> row_counters_host(params.n_rows);
raft::update_host(row_counters_host.data(), row_counters.data(), row_counters.size(), stream);
RAFT_CUDA_TRY(hipStreamSynchronize(stream));
// 1. Check that each row contains enough values
index_t nnz_per_row = raft::ceildiv(params.n_cols, params.divisor);
for (index_t i = 0; i < params.n_rows; ++i) {
ASSERT_EQ(row_counters_host[i], nnz_per_row) << "where i = " << i;
}
// 2. Check that all column indices are divisble by divisor
for (index_t i = 0; i < params.n_rows; ++i) {
index_t row_base = row_ind_host[i];
for (index_t j = 0; j < nnz_per_row; ++j) {
ASSERT_EQ(0, col_ind_host[row_base + j] % params.divisor);
}
}
}
protected:
raft::resources handle;
hipStream_t stream;
CSRAdjGraphInputs<index_t> params;
rmm::device_uvector<bool> adj;
rmm::device_uvector<index_t> row_ind;
rmm::device_uvector<index_t> row_counters;
rmm::device_uvector<index_t> col_ind;
std::vector<index_t> row_ind_host;
};
using CSRAdjGraphTestI = CSRAdjGraphTest<int>;
TEST_P(CSRAdjGraphTestI, Result) { Run(); }
using CSRAdjGraphTestL = CSRAdjGraphTest<int64_t>;
TEST_P(CSRAdjGraphTestL, Result) { Run(); }
const std::vector<CSRAdjGraphInputs<int>> csradjgraph_inputs_i = {{10, 10, 2}};
const std::vector<CSRAdjGraphInputs<int64_t>> csradjgraph_inputs_l = {
{0, 0, 2},
{10, 10, 2},
{64 * 1024 + 10, 2, 3}, // 64K + 10 is slightly over maximum of blockDim.y
{16, 16, 3}, // No peeling-remainder
{17, 16, 3}, // Check peeling-remainder
{18, 16, 3}, // Check peeling-remainder
{32 + 9, 33, 2}, // Check peeling-remainder
};
INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest,
CSRAdjGraphTestI,
::testing::ValuesIn(csradjgraph_inputs_i));
INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest,
CSRAdjGraphTestL,
::testing::ValuesIn(csradjgraph_inputs_l));
} // namespace sparse
} // namespace raft
| 4749d4cd3712de816811f6adaaf61f5e795074cb.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/sparse/convert/csr.cuh>
#include <raft/sparse/coo.hpp>
#include <iostream>
namespace raft {
namespace sparse {
/**************************** sorted COO to CSR ****************************/
template <typename T>
struct SparseConvertCSRInputs {
int m, n, nnz;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const SparseConvertCSRInputs<T>& dims)
{
return os;
}
template <typename T>
class SparseConvertCSRTest : public ::testing::TestWithParam<SparseConvertCSRInputs<T>> {
protected:
void SetUp() override {}
void TearDown() override {}
protected:
SparseConvertCSRInputs<T> params;
};
const std::vector<SparseConvertCSRInputs<float>> inputsf = {{5, 10, 5, 1234ULL}};
typedef SparseConvertCSRTest<float> SortedCOOToCSR;
TEST_P(SortedCOOToCSR, Result)
{
cudaStream_t stream;
cudaStreamCreate(&stream);
int nnz = 8;
int* in_h = new int[nnz]{0, 0, 1, 1, 2, 2, 3, 3};
int* exp_h = new int[4]{0, 2, 4, 6};
rmm::device_uvector<int> in(nnz, stream);
rmm::device_uvector<int> exp(4, stream);
rmm::device_uvector<int> out(4, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(in.data(), 0, in.size() * sizeof(int), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(exp.data(), 0, exp.size() * sizeof(int), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(out.data(), 0, out.size() * sizeof(int), stream));
raft::update_device(in.data(), in_h, nnz, stream);
raft::update_device(exp.data(), exp_h, 4, stream);
convert::sorted_coo_to_csr<int>(in.data(), nnz, out.data(), 4, stream);
ASSERT_TRUE(raft::devArrMatch<int>(out.data(), exp.data(), 4, raft::Compare<int>(), stream));
cudaStreamDestroy(stream);
delete[] in_h;
delete[] exp_h;
}
INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest, SortedCOOToCSR, ::testing::ValuesIn(inputsf));
/******************************** adj graph ********************************/
template <typename index_t>
__global__ void init_adj_kernel(bool* adj, index_t num_rows, index_t num_cols, index_t divisor)
{
index_t r = blockDim.y * blockIdx.y + threadIdx.y;
index_t c = blockDim.x * blockIdx.x + threadIdx.x;
for (; r < num_rows; r += gridDim.y * blockDim.y) {
for (; c < num_cols; c += gridDim.x * blockDim.x) {
adj[r * num_cols + c] = c % divisor == 0;
}
}
}
template <typename index_t>
void init_adj(bool* adj, index_t num_rows, index_t num_cols, index_t divisor, cudaStream_t stream)
{
// adj matrix: element a_ij is set to one if j is divisible by divisor.
dim3 block(32, 32);
const index_t max_y_grid_dim = 65535;
dim3 grid(num_cols / 32 + 1, (int)min(num_rows / 32 + 1, max_y_grid_dim));
init_adj_kernel<index_t><<<grid, block, 0, stream>>>(adj, num_rows, num_cols, divisor);
RAFT_CHECK_CUDA(stream);
}
template <typename index_t>
struct CSRAdjGraphInputs {
index_t n_rows;
index_t n_cols;
index_t divisor;
};
template <typename index_t>
class CSRAdjGraphTest : public ::testing::TestWithParam<CSRAdjGraphInputs<index_t>> {
public:
CSRAdjGraphTest()
: stream(resource::get_cuda_stream(handle)),
params(::testing::TestWithParam<CSRAdjGraphInputs<index_t>>::GetParam()),
adj(params.n_rows * params.n_cols, stream),
row_ind(params.n_rows, stream),
row_counters(params.n_rows, stream),
col_ind(params.n_rows * params.n_cols, stream),
row_ind_host(params.n_rows)
{
}
protected:
void SetUp() override
{
// Initialize adj matrix: element a_ij equals one if j is divisible by
// params.divisor.
init_adj(adj.data(), params.n_rows, params.n_cols, params.divisor, stream);
// Initialize row_ind
for (size_t i = 0; i < row_ind_host.size(); ++i) {
size_t nnz_per_row = raft::ceildiv(params.n_cols, params.divisor);
row_ind_host[i] = nnz_per_row * i;
}
raft::update_device(row_ind.data(), row_ind_host.data(), row_ind.size(), stream);
// Initialize result to 1, so we can catch any errors.
RAFT_CUDA_TRY(cudaMemsetAsync(col_ind.data(), 1, col_ind.size() * sizeof(index_t), stream));
}
void Run()
{
convert::adj_to_csr<index_t>(handle,
adj.data(),
row_ind.data(),
params.n_rows,
params.n_cols,
row_counters.data(),
col_ind.data());
std::vector<index_t> col_ind_host(col_ind.size());
raft::update_host(col_ind_host.data(), col_ind.data(), col_ind.size(), stream);
std::vector<index_t> row_counters_host(params.n_rows);
raft::update_host(row_counters_host.data(), row_counters.data(), row_counters.size(), stream);
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
// 1. Check that each row contains enough values
index_t nnz_per_row = raft::ceildiv(params.n_cols, params.divisor);
for (index_t i = 0; i < params.n_rows; ++i) {
ASSERT_EQ(row_counters_host[i], nnz_per_row) << "where i = " << i;
}
// 2. Check that all column indices are divisble by divisor
for (index_t i = 0; i < params.n_rows; ++i) {
index_t row_base = row_ind_host[i];
for (index_t j = 0; j < nnz_per_row; ++j) {
ASSERT_EQ(0, col_ind_host[row_base + j] % params.divisor);
}
}
}
protected:
raft::resources handle;
cudaStream_t stream;
CSRAdjGraphInputs<index_t> params;
rmm::device_uvector<bool> adj;
rmm::device_uvector<index_t> row_ind;
rmm::device_uvector<index_t> row_counters;
rmm::device_uvector<index_t> col_ind;
std::vector<index_t> row_ind_host;
};
using CSRAdjGraphTestI = CSRAdjGraphTest<int>;
TEST_P(CSRAdjGraphTestI, Result) { Run(); }
using CSRAdjGraphTestL = CSRAdjGraphTest<int64_t>;
TEST_P(CSRAdjGraphTestL, Result) { Run(); }
const std::vector<CSRAdjGraphInputs<int>> csradjgraph_inputs_i = {{10, 10, 2}};
const std::vector<CSRAdjGraphInputs<int64_t>> csradjgraph_inputs_l = {
{0, 0, 2},
{10, 10, 2},
{64 * 1024 + 10, 2, 3}, // 64K + 10 is slightly over maximum of blockDim.y
{16, 16, 3}, // No peeling-remainder
{17, 16, 3}, // Check peeling-remainder
{18, 16, 3}, // Check peeling-remainder
{32 + 9, 33, 2}, // Check peeling-remainder
};
INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest,
CSRAdjGraphTestI,
::testing::ValuesIn(csradjgraph_inputs_i));
INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest,
CSRAdjGraphTestL,
::testing::ValuesIn(csradjgraph_inputs_l));
} // namespace sparse
} // namespace raft
|
b6adcb0826909dda61a82a8f87755d79faf48928.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/native/BatchLinearAlgebra.h>
#include <ATen/native/hip/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THH/THH.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma_types.h>
#include <magma_v2.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSyevd(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaEig(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda,
scalar_t *w, scalar_t *VL, magma_int_t ldvl,
scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork,
value_t *rwork,
magma_int_t *info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaGels(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb,
scalar_t* hwork, magma_int_t lwork, magma_int_t* info);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesv_gpu(n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesv_gpu(n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_zgesv_batched(n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_cgesv_batched(n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>(
magma_int_t n) {
return magma_get_zgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>(
magma_int_t n) {
return magma_get_cgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetri<c10::complex<double>>(
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<double>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetri_gpu(
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaDoubleComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetri<c10::complex<float>>(
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<float>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetri_gpu(
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaFloatComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<double>>(
magma_int_t n,
c10::complex<double>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<double>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetri_outofplace_batched(
n,
reinterpret_cast<magmaDoubleComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<float>>(
magma_int_t n,
c10::complex<float>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<float>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetri_outofplace_batched(
n,
reinterpret_cast<magmaFloatComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaFloatComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
double *A, magma_int_t lda,
double *w,
double *VL, magma_int_t ldvl,
double *VR, magma_int_t ldvr,
double *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
// magma [sd]geev wants to separate output arrays: wr and wi for the real
// and imaginary parts
double *wr = w;
double *wi = w + n;
(void)rwork; // unused
magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
float *A, magma_int_t lda,
float *w,
float *VL, magma_int_t ldvl,
float *VR, magma_int_t ldvr,
float *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
float *wr = w;
float *wi = w + n;
(void)rwork; // unused
magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<c10::complex<double>, double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<double> *A, magma_int_t lda,
c10::complex<double> *w,
c10::complex<double> *VL, magma_int_t ldvl,
c10::complex<double> *VR, magma_int_t ldvr,
c10::complex<double> *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_zgeev(jobvl, jobvr, n,
reinterpret_cast<magmaDoubleComplex*>(A), lda,
reinterpret_cast<magmaDoubleComplex*>(w),
reinterpret_cast<magmaDoubleComplex*>(VL), ldvl,
reinterpret_cast<magmaDoubleComplex*>(VR), ldvr,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<c10::complex<float>, float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<float> *A, magma_int_t lda,
c10::complex<float> *w,
c10::complex<float> *VL, magma_int_t ldvl,
c10::complex<float> *VR, magma_int_t ldvr,
c10::complex<float> *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_cgeev(jobvl, jobvr, n,
reinterpret_cast<magmaFloatComplex*>(A), lda,
reinterpret_cast<magmaFloatComplex*>(w),
reinterpret_cast<magmaFloatComplex*>(VL), ldvl,
reinterpret_cast<magmaFloatComplex*>(VR), ldvr,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<float>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb,
float* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<double>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb,
double* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<c10::complex<float>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb,
c10::complex<float>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb,
reinterpret_cast<magmaFloatComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<c10::complex<double>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb,
c10::complex<double>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb,
reinterpret_cast<magmaDoubleComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
#endif
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, Tensor& infos_out) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = ::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
// magmaSolve requires infos tensor to live on CPU
Tensor infos = at::empty(infos_out.sizes(), infos_out.options().device(kCPU));
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, infos.data_ptr<magma_int_t>());
infos_out.copy_(infos);
} else {
auto infos_data = infos_out.data_ptr<magma_int_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &infos_data[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&infos_data[mini_idx], batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
// infos might not get filled for empty inputs therefore at::zeros is used instead of at::empty
auto infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// This is a type dispatching helper function for 'apply_solve'
Tensor& _linalg_solve_out_helper_cuda(Tensor& result, Tensor& input, Tensor& infos) {
// 'result' and 'input' should be in column major order (it should be checked before calling this function)
// the content of 'result', 'input' and 'infos' is overwritten by 'apply_solve'
// 'result' should contain data of 'other' tensor (right-hand-side of the linear system of equations)
// 'input' should contain data of origianl 'input' tensor (left-hand-side of the linear system)
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_solve_out_cpu", [&]{
apply_solve<scalar_t>(result, input, infos);
});
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of n-by-n matrix 'self', it is saved to 'self_inv'.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
'infos_lu' is for holding magmaLU errors, and 'infos_getri' is for holding magmaGetri errors
For more information see MAGMA's documentation for GETRI and GETRF routines.
*/
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_lu_data = infos_lu.data_ptr<magma_int_t>();
auto infos_getri_data = infos_getri.data_ptr<magma_int_t>();
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
// MAGMA does not work with batch_size == 0, let's return early in this case
if (batch_size == 0) {
return;
}
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * lda);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
// magmaLuBatched leaves ipiv_data values unwritten for singular matrices.
// Initialize to avoid memory access violations inside magma kernels (gh-51930).
std::fill_n(ipiv_data, batch_size * n, 1);
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, lda, ipiv_array, infos_lu_data,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur_getri = &infos_getri_data[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, lda, ipiv_array_cur, self_inv_array_cur,
lda, info_array_cur_getri, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], lda, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
lda, &infos_getri_data[mini_idx], batch_size % batch_limit, magma_queue);
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
// magmaLu and magmaGetri requires infos tensor to live on CPU
infos_lu = infos_lu.to(at::kCPU);
infos_getri = infos_getri.to(at::kCPU);
Tensor ipiv = at::empty({lda}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, lda, ipiv.data_ptr<magma_int_t>(), infos_lu.data_ptr<magma_int_t>());
magmaGetri<scalar_t>(
n, self_data, lda, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, infos_getri.data_ptr<magma_int_t>());
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
auto infos_lu = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto infos_getri = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos_lu, infos_getri);
});
batchCheckErrors(infos_lu, "inverse_cuda");
batchCheckErrors(infos_getri, "inverse_cuda");
} else {
// magmaLu and magmaGetri requires infos tensor to live on CPU
auto infos_lu = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
auto infos_getri = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, infos_lu, infos_getri);
});
singleCheckErrors(infos_lu.item().toInt(), "inverse_cuda");
singleCheckErrors(infos_getri.item().toInt(), "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// This is a type dispatching helper function for 'apply_batched_inverse' and 'singleCheckErrors'
Tensor& _linalg_inv_out_helper_cuda_legacy(Tensor& result, Tensor& infos_lu, Tensor& infos_getri) {
// assuming result is in column major order and contains the matrices to invert
if (result.dim() > 2) {
auto input_working_copy = cloneBatchedColumnMajor(result);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse<scalar_t>(
input_working_copy, result, infos_lu, infos_getri);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse<scalar_t>(result, infos_lu, infos_getri);
});
}
return result;
}
// This is a MAGMA/cuSOLVER dispatching helper function
Tensor& _linalg_inv_out_helper_cuda(Tensor &result, Tensor& infos_lu, Tensor& infos_getri) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
#ifdef USE_CUSOLVER
if ((result.dim() == 2) || (/* result.dim() > 2 && */ batchCount(result) <= 2) || !use_magma_) {
return _linalg_inv_out_helper_cuda_lib(result, infos_lu, infos_getri); // cusolver or cublas
} else {
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
}
#else
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
#endif
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, lda,
b_data, lda, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, lda, b_array_cur, lda,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda_magma(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// Todo: cusolverDn<T>potrsBatched only supports nrhs == 1 and does not have good performance.
// Batched cholesky_solve is dispatched to magma.
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(self) == 1 || !use_magma_) {
return _cholesky_solve_helper_cuda_cusolver(self, A, upper);
} else {
return _cholesky_solve_helper_cuda_magma(self, A, upper);
}
#else
return _cholesky_solve_helper_cuda_magma(self, A, upper);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
auto lda = std::max<magma_int_t>(1, n);
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
int64_t batch_limit = self.is_complex() ? 65535 : 262140;
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information)
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, lda, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaCholeskyBatched<scalar_t>(
uplo, n, &self_array[mini_idx], lda, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda_magma(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor result;
if (self.dim() > 2) {
// MAGMA's batched cholesky operator has an off-by-one error causing IMA
// (see https://github.com/pytorch/pytorch/issues/42666). This code is based
// on the #cloneBatchedColumnMajor function however it pads the input with
// one extra element utilizing the fact that the resize_as_ method preserves
// the storage even if it's larger than the new sizes. This way if MAGMA
// reads off bounds it will still be valid user memory.
const Tensor input = upper ? self : self.transpose(-1, -2);
result = at::empty(input.numel() + 1, input.options());
result.resize_as_(input).copy_(input).transpose_(-1, -2);
} else {
result = cloneBatchedColumnMajor(upper ? self.transpose(-1, -2) : self);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
self.scalar_type(), "cholesky_cuda", [&] {
apply_cholesky<scalar_t>(result, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
return upper ? result.transpose_(-1, -2) : result;
}
// Todo: cusolverDnXpotrfBatched has some numerical issue and is not used
// here. Batched cholesky is dispatched to magma.
// We will switch to cusolverDnXpotrfBatched after the issue is fixed.
// See https://github.com/pytorch/pytorch/issues/53879.
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(self) == 1 || !use_magma_) {
return _cholesky_helper_cuda_cusolver(self, upper);
}
else {
return _cholesky_helper_cuda_magma(self, upper);
}
#else
return _cholesky_helper_cuda_magma(self, upper);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of a symmetric (Hermitian) positive-definite matrix n-by-n matrix 'input' using the Cholesky solver
This is an in-place routine, content of 'input' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
MAGMA requires 'infos' to reside in CPU memory.
For more information see MAGMA's documentation for POTRS routine.
*/
template <typename scalar_t>
static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA.");
#else
// magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally
// it transfers data several times between GPU and CPU and calls lapack routine on CPU
// using magmaCholeskySolveBatched is a lot faster
// note that magmaCholeskySolve is also slow
// 'input' is modified in-place we need to clone it and replace with a diagonal matrix
// for apply_cholesky_solve
auto input_working_copy = cloneBatchedColumnMajor(input);
// 'input' tensor has to be a batch of diagonal matrix
input.fill_(0);
input.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
Tensor result_u, input_u;
if (input.dim() == 2) {
// unsqueezing here so that the batched version is used
result_u = input.unsqueeze(0);
input_u = input_working_copy.unsqueeze(0);
} else {
result_u = input;
input_u = input_working_copy;
}
// magma's potrs_batched doesn't take matrix-wise array of ints as an 'info' argument
// it returns a single 'magma_int_t'
// if info = 0 the operation is successful, if info = -i, the i-th parameter had an illegal value.
int64_t info_tmp = 0;
apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp);
infos.fill_(info_tmp);
#endif
}
// This is a type dispatching helper function for 'apply_cholesky_inverse'
Tensor& cholesky_inverse_kernel_impl_magma(Tensor &result, Tensor& infos, bool upper) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_inverse_out_cuda", [&]{
apply_cholesky_inverse<scalar_t>(result, infos, upper);
});
return result;
}
Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
// the content of result is overwritten by 'apply_cholesky_inverse'
#ifdef USE_CUSOLVER
if (batchCount(result) == 1 || !use_magma_) {
return cholesky_inverse_kernel_impl_cusolver(result, infos, upper);
} else {
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
}
#else
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
#endif
}
REGISTER_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_int_t k = ::min(m, n);
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({k}, at::kInt);
magmaLu<scalar_t>(
m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
m, n, self_array, m, pivots_array,
infos.data_ptr<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
m, n, self_array, m, infos.data_ptr<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
auto m = self.size(-2);
auto n = self.size(-1);
auto k = ::min(m, n);
auto req_size = self.sizes().vec();
req_size.pop_back();
req_size.back() = k;
Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true);
} else {
batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true);
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve_batched(Tensor& A, Tensor& b, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
trans = conjugate_transpose ? MagmaConjTrans : trans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
// magma returns early if m <= 0 || n <= 0 for magmaTriangularSolveBatched
// magmaTriangularSolve is calling cuBLAS and it prints
// ** On entry to DTRSM parameter number 9 had an illegal value
// so let's use proper lda parameter here
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit;
int64_t mini_idx; // this is outside the loop because it is used for the case batch_size % batch_limit != 0
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
lda, b_array_cur, lda, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
lda, &b_array[mini_idx], lda, batch_size % batch_limit, magma_queue);
}
#endif
}
void triangular_solve_batched_magma(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
void triangular_solve_kernel(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
// For batches smaller than 8 and matrix sizes larger than 64x64 cuBLAS forloop is faster than batched version
if (batchCount(A) <= 8 && A.size(-1) >= 64) {
triangular_solve_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
#ifndef USE_MAGMA
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
#else
// cuBLAS batched is faster than MAGMA batched up until 512x512, after that MAGMA is faster
if (A.size(-1) <= 512) {
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
triangular_solve_batched_magma(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
}
#endif // USE_MAGMA
}
}
REGISTER_DISPATCH(triangular_solve_stub, &triangular_solve_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ orgqr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tensor& orgqr_kernel_impl(Tensor& result, const Tensor& tau, Tensor& infos, int64_t n_columns) {
// TODO: It is possible to implement efficient batched orgqr for small tau (tau.size(-1) <= 32)
// using MAGMA, however it fails on Windows because of some illegal memory reads inside MAGMA.
// See discussions in https://github.com/pytorch/pytorch/pull/51348 for comparison of cuSOLVER-MAGMA
// and Windows failure.
// For reference here is the MAGMA-based implementation: https://gist.github.com/IvanYashchuk/2db50002c9d3c1462ff769e6410ad983
#if defined(USE_CUSOLVER)
return orgqr_helper_cuda_lib(result, tau, infos, n_columns); // cusolver
#else
TORCH_CHECK(false, "Calling torch.orgqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(orgqr_stub, &orgqr_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t q_size_minus_2, int64_t r_size_minus_1, int64_t n_columns,
bool compute_q, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_int_t m = magma_int_cast(q_size_minus_2, "Q.size(-2)");
magma_int_t n = magma_int_cast(r_size_minus_1, "R.size(-1)");
auto r_data = R.data_ptr<scalar_t>();
auto r_matrix_stride = matrixStride(R);
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
infos[i] = info;
if (info != 0) {
return;
}
}
if (!compute_q) {
// this is for mode='r'
return;
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
auto q_data = Q.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
infos[i] = info;
if (info != 0) {
return;
}
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor,Tensor> _linalg_qr_helper_cuda(const Tensor& self, std::string mode) {
bool compute_q, reduced;
std::tie(compute_q, reduced) = _parse_qr_mode(mode);
std::vector<int64_t> infos(batchCount(self), 0);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
int64_t n = self.size(-1);
r_working_copy = at::empty({n_columns_q, n}, self.options());
if (compute_q) {
int64_t n_rows_q = q_sizes[self.dim() - 2];
q_working_copy = at::eye(n_rows_q, n_columns_q, self.options());
} else {
q_working_copy = at::empty({0}, self.options());
}
return std::make_tuple(q_working_copy, r_working_copy);
}
if (compute_q) {
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
} else {
q_working_copy = at::empty({0}, self.options());
}
r_working_copy = cloneBatchedColumnMajor(self);
int64_t m = q_sizes[self.dim() - 2];
int64_t n = r_working_copy.size(-1);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, m, n, n_columns_q, compute_q, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "qr_cuda");
} else {
singleCheckErrors(infos[0], "qr_cuda");
}
if (compute_q) {
q_working_copy = q_working_copy.narrow(-1, 0, n_columns_q);
}
r_working_copy = r_working_copy.narrow(-2, 0, n_columns_q).triu();
return std::make_tuple(q_working_copy, r_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_magma_eigh(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.eigh/eigvalsh on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == kCPU);
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(vectors.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, lda * lda);
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_data, lda, values_data,
wA, lda, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, infos_data);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(std::max<int64_t>(1, real_impl<scalar_t, value_t>(wkopt)), "work_size");
liwork = magma_int_cast(std::max<int64_t>(1, iwkopt), "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (vectors.is_complex()) {
lrwork = magma_int_cast(std::max<int64_t>(1, rwkopt), "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
magma_int_t* info_working_ptr = &infos_data[i];
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr,
wA, lda, work, lwork, rwork, lrwork, iwork, liwork, info_working_ptr);
// The current behaviour for Linear Algebra functions to raise an error if something goes wrong
// or input doesn't satisfy some requirement
// therefore return early since further computations will be wasted anyway
if (*info_working_ptr != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
Tensor infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt).device(at::kCPU));
auto eigvals_shape = IntArrayRef(self.sizes().data(), self.dim()-1); // self.shape[:-1]
ScalarType real_dtype = toValueType(self.scalar_type());
// magmaSyevd uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(eigvals_shape, self.options().dtype(real_dtype))
: at::empty(eigvals_shape, self.options().dtype(real_dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_magma_eigh<scalar_t>(eigvals_working_copy, self_working_copy, infos, upper, eigenvectors);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eigh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This is a type dispatch function for 'apply_magma_eigh'
// For small inputs result is computed on CPU
void linalg_eigh_magma(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
// MAGMA just calls LAPACK for eigenvectors.size(-1) <= 128
// See https://bitbucket.org/icl/magma/src/e6fdca447bd402693e8b0b950a898b6879bbcc41/src/zheevd_gpu.cpp?at=master#lines-258
// in addition lda is ignored breaking 0x0 inputs
if (eigenvectors.size(-1) > 128) {
// MAGMA requires eigenvalues and infos tensors to reside on CPU
Tensor eigenvalues_cpu = eigenvalues.to(kCPU);
infos = infos.to(kCPU);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
eigenvectors.scalar_type(), "linalg_eigh_cpu", [&] {
apply_magma_eigh<scalar_t>(
eigenvalues_cpu, eigenvectors, infos, upper, compute_eigenvectors);
});
// Transfer computed by MAGMA results from CPU to GPU
eigenvalues.copy_(eigenvalues_cpu);
} else { // eigenvectors.size(-1) <= 128
// transfer to CPU, compute the result and copy back to GPU
// this is faster than going through MAGMA that does the same
Tensor eigenvalues_cpu = at::empty_like(eigenvalues, eigenvalues.options().device(kCPU));
if (compute_eigenvectors) {
Tensor eigenvectors_cpu = at::empty_like(eigenvectors, eigenvectors.options().device(kCPU));
at::linalg_eigh_out(eigenvalues_cpu, eigenvectors_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
eigenvectors.copy_(eigenvectors_cpu);
} else {
at::linalg_eigvalsh_out(eigenvalues_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
}
eigenvalues.copy_(eigenvalues_cpu);
}
}
void linalg_eigh_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#if defined(USE_CUSOLVER)
linalg_eigh_cusolver(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#else
linalg_eigh_magma(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#endif
}
REGISTER_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU
// memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy
// the returned values from CPU to GPU. See also magmaSymeig, which uses a
// similar approach.
template <typename scalar_t>
static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs,
int64_t *info_ptr) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor");
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto self_data = self.data_ptr<scalar_t>();
auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>();
scalar_t *wr = out_eigvals_data;
scalar_t *vr_data = NULL;
magma_int_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = out_eigvecs.data_ptr<scalar_t>();
ldvr = n;
}
value_t *rwork_data = nullptr;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
ALLOCATE_ARRAY(rwork_data, value_t, n*2);
}
if (n > 0) {
// call magmaEig once to get the optimal size of work_data
scalar_t wkopt;
magma_int_t info;
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, &wkopt, -1, rwork_data, &info);
magma_int_t lwork = static_cast<magma_int_t>(real_impl<scalar_t, value_t>(wkopt));
// call it a 2nd time to to the actual work
scalar_t *work_data = nullptr;
ALLOCATE_ARRAY(work_data, scalar_t, lwork);
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, work_data, lwork, rwork_data, &info);
*info_ptr = info;
}
#endif
}
/*
* Internal helper; like eig_cuda but:
* 1. assume that self is a square matrix of side "n"
* 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory
* by the caller
*/
std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) {
int64_t n = self.size(-1);
// copy self to pinned CPU memory
auto self_working_copy = at::empty_strided(
{n, n}, // square matrix
{1, n}, // column-ordered, as magmaEig expects
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
// tensors holding the results. We use empty_strided to make them column-ordered
auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor out_eigvals;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
out_eigvals = at::empty({n}, options);
} else {
out_eigvals = at::empty_strided({n, 2}, {1, n}, options);
}
auto out_eigvecs = eigenvectors
? at::empty_strided({n, n}, {1, n}, options)
: Tensor();
int64_t info;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cuda", [&]{
apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info);
});
singleCheckErrors(info, "eig_cuda");
return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs);
}
REGISTER_DISPATCH(eig_stub, &eig_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the eigenvalues and eigenvectors of n-by-n matrix 'input'.
This is an in-place routine, content of 'input', 'values', 'vectors' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
For more information see MAGMA's documentation for GEEV routine.
*/
template <typename scalar_t>
void apply_linalg_eig(Tensor& values, Tensor& vectors, Tensor& input, Tensor& infos, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.linalg.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.linalg.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == at::kCPU);
if (compute_eigenvectors) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(vectors.device() == at::kCPU);
}
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_vec_t jobvl = MagmaNoVec; // only right eigenvectors are computed
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(input);
auto input_matrix_stride = matrixStride(input);
auto values_stride = values.size(-1);
auto input_data = input.data_ptr<scalar_t>();
auto values_data = values.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto rvectors_data = compute_eigenvectors ? vectors.data_ptr<scalar_t>() : nullptr;
scalar_t* lvectors_data = nullptr; // only right eigenvectors are computed
int64_t ldvr = compute_eigenvectors ? lda : 1;
int64_t ldvl = 1;
Tensor rwork;
value_t* rwork_data = nullptr;
if (input.is_complex()) {
ScalarType real_dtype = toValueType(input.scalar_type());
rwork = at::empty({lda * 2}, input.options().dtype(real_dtype));
rwork_data = rwork.data_ptr<value_t>();
}
// call magmaEig once to get the optimal size of work_data
scalar_t work_query;
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_data, lda, values_data,
lvectors_data, ldvl, rvectors_data, ldvr, &work_query, -1, rwork_data, &infos_data[0]);
magma_int_t lwork = std::max<magma_int_t>(1, static_cast<magma_int_t>(real_impl<scalar_t, value_t>(work_query)));
Tensor work = at::empty({lwork}, input.dtype());
auto work_data = work.data_ptr<scalar_t>();
for (auto i = decltype(batch_size){0}; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* values_working_ptr = &values_data[i * values_stride];
scalar_t* rvectors_working_ptr = compute_eigenvectors ? &rvectors_data[i * input_matrix_stride] : nullptr;
int* info_working_ptr = &infos_data[i];
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_working_ptr, lda, values_working_ptr,
lvectors_data, ldvl, rvectors_working_ptr, ldvr, work_data, lwork, rwork_data, info_working_ptr);
}
#endif
}
// This is a type dispatching helper function for 'apply_linalg_eig'
void linalg_eig_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, const Tensor& input, bool compute_eigenvectors) {
// This function calculates the non-symmetric eigendecomposition in-place
// tensors should be in batched column major memory format
// the content of eigenvalues, eigenvectors and infos is overwritten by 'apply_linalg_eig'
// apply_linalg_eig modifies the provided input matrix in-place, therefore we need a copy
// MAGMA doesn't have GPU interface for the eigendecomposition and it forces us to transfer 'input' to CPU
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.is_cuda());
Tensor input_working_copy = at::empty(input.sizes(), input.options().device(kCPU));
input_working_copy.transpose_(-2, -1); // make input_working_copy to have Fortran contiguous memory layout
input_working_copy.copy_(input);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "linalg_eig_out_cuda", [&]{
apply_linalg_eig<scalar_t>(eigenvalues, eigenvectors, input_working_copy, infos, compute_eigenvectors);
});
}
REGISTER_DISPATCH(linalg_eig_stub, &linalg_eig_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto lda = std::max<magma_int_t>(1, m);
auto ldvt = std::max<magma_int_t>(1, n);
auto mn = ::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt = 1; // MAGMA might not set the value for the optimal workspace therefore use 1 as the default value
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, lda,
S_working_ptr, U_working_ptr, lda, VT_working_ptr, ldvt, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_legacy(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = ::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (!compute_uv) {
VT_working_copy.zero_();
U_working_copy.zero_();
}
if (some) {
VT_working_copy = VT_working_copy.narrow(-2, 0, k);
}
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
// Note that the 'apply_svd' routine returns VT = V^T (for real inputs) or VT = V^H (for complex inputs), not V.
VT_working_copy = VT_working_copy.conj();
VT_working_copy.transpose_(-2, -1);
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
#ifdef USE_CUSOLVER
return _svd_helper_cuda_lib(self, some, compute_uv);
#else
return _svd_helper_cuda_legacy(self, some, compute_uv);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("lu_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto n = lu.size(-2);
auto nrhs = b.size(-1);
int info_tmp = 0;
if (b.dim() == 2) {
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
} else {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaLuSolveBatched<scalar_t>(
n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data);
auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous();
if (self.numel() == 0 || LU_data.numel() == 0) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{
apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info);
});
TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lstsq ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tensor& _lstsq_helper_cuda(
Tensor& b, Tensor& rank, Tensor& singular_values, Tensor& infos, const Tensor& a, double cond, std::string driver_name) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "torch.linalg.lstsq: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "torch.linalg.lstsq_cuda", [&] {
auto trans = MagmaNoTrans;
auto m = magma_int_cast(a.size(-2), "m");
auto n = magma_int_cast(a.size(-1), "n");
auto nrhs = magma_int_cast(b.size(-1), "nrhs");
auto ldda = std::max<magma_int_t>(1, m);
auto lddb = std::max<magma_int_t>(1, ::max(m, n));
auto nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
auto lwork = (m - n + nb) * (nrhs + nb) + nrhs * nb;
Tensor hwork = at::empty({static_cast<int64_t>(lwork)}, a.scalar_type());
auto* hwork_ptr = hwork.data_ptr<scalar_t>();
// MAGMA requires infos tensor to live on CPU
infos = infos.to(at::kCPU);
auto infos_data = infos.data_ptr<magma_int_t>();
batch_iterator_with_broadcasting<scalar_t>(a, b,
[&](scalar_t* a_working_ptr, scalar_t* b_working_ptr,
int64_t a_linear_batch_idx) {
magma_int_t* infos_working_ptr = &infos_data[a_linear_batch_idx];
magmaGels<scalar_t>(trans, m, n, nrhs,
a_working_ptr, ldda, b_working_ptr, lddb,
hwork_ptr, lwork, infos_working_ptr);
}
);
});
return b;
#endif
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
| b6adcb0826909dda61a82a8f87755d79faf48928.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/native/BatchLinearAlgebra.h>
#include <ATen/native/cuda/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THC/THC.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma_types.h>
#include <magma_v2.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSyevd(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaEig(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda,
scalar_t *w, scalar_t *VL, magma_int_t ldvl,
scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork,
value_t *rwork,
magma_int_t *info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaGels(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb,
scalar_t* hwork, magma_int_t lwork, magma_int_t* info);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesv_gpu(n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesv_gpu(n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_zgesv_batched(n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_cgesv_batched(n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>(
magma_int_t n) {
return magma_get_zgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>(
magma_int_t n) {
return magma_get_cgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetri<c10::complex<double>>(
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<double>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetri_gpu(
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaDoubleComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetri<c10::complex<float>>(
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<float>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetri_gpu(
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaFloatComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<double>>(
magma_int_t n,
c10::complex<double>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<double>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetri_outofplace_batched(
n,
reinterpret_cast<magmaDoubleComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<float>>(
magma_int_t n,
c10::complex<float>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<float>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetri_outofplace_batched(
n,
reinterpret_cast<magmaFloatComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaFloatComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
double *A, magma_int_t lda,
double *w,
double *VL, magma_int_t ldvl,
double *VR, magma_int_t ldvr,
double *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
// magma [sd]geev wants to separate output arrays: wr and wi for the real
// and imaginary parts
double *wr = w;
double *wi = w + n;
(void)rwork; // unused
magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
float *A, magma_int_t lda,
float *w,
float *VL, magma_int_t ldvl,
float *VR, magma_int_t ldvr,
float *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
float *wr = w;
float *wi = w + n;
(void)rwork; // unused
magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<c10::complex<double>, double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<double> *A, magma_int_t lda,
c10::complex<double> *w,
c10::complex<double> *VL, magma_int_t ldvl,
c10::complex<double> *VR, magma_int_t ldvr,
c10::complex<double> *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_zgeev(jobvl, jobvr, n,
reinterpret_cast<magmaDoubleComplex*>(A), lda,
reinterpret_cast<magmaDoubleComplex*>(w),
reinterpret_cast<magmaDoubleComplex*>(VL), ldvl,
reinterpret_cast<magmaDoubleComplex*>(VR), ldvr,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<c10::complex<float>, float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<float> *A, magma_int_t lda,
c10::complex<float> *w,
c10::complex<float> *VL, magma_int_t ldvl,
c10::complex<float> *VR, magma_int_t ldvr,
c10::complex<float> *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_cgeev(jobvl, jobvr, n,
reinterpret_cast<magmaFloatComplex*>(A), lda,
reinterpret_cast<magmaFloatComplex*>(w),
reinterpret_cast<magmaFloatComplex*>(VL), ldvl,
reinterpret_cast<magmaFloatComplex*>(VR), ldvr,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<float>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb,
float* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<double>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb,
double* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<c10::complex<float>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb,
c10::complex<float>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb,
reinterpret_cast<magmaFloatComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<c10::complex<double>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb,
c10::complex<double>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb,
reinterpret_cast<magmaDoubleComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
#endif
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, Tensor& infos_out) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = std::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
// magmaSolve requires infos tensor to live on CPU
Tensor infos = at::empty(infos_out.sizes(), infos_out.options().device(kCPU));
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, infos.data_ptr<magma_int_t>());
infos_out.copy_(infos);
} else {
auto infos_data = infos_out.data_ptr<magma_int_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &infos_data[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&infos_data[mini_idx], batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
// infos might not get filled for empty inputs therefore at::zeros is used instead of at::empty
auto infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// This is a type dispatching helper function for 'apply_solve'
Tensor& _linalg_solve_out_helper_cuda(Tensor& result, Tensor& input, Tensor& infos) {
// 'result' and 'input' should be in column major order (it should be checked before calling this function)
// the content of 'result', 'input' and 'infos' is overwritten by 'apply_solve'
// 'result' should contain data of 'other' tensor (right-hand-side of the linear system of equations)
// 'input' should contain data of origianl 'input' tensor (left-hand-side of the linear system)
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_solve_out_cpu", [&]{
apply_solve<scalar_t>(result, input, infos);
});
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of n-by-n matrix 'self', it is saved to 'self_inv'.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
'infos_lu' is for holding magmaLU errors, and 'infos_getri' is for holding magmaGetri errors
For more information see MAGMA's documentation for GETRI and GETRF routines.
*/
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_lu_data = infos_lu.data_ptr<magma_int_t>();
auto infos_getri_data = infos_getri.data_ptr<magma_int_t>();
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
// MAGMA does not work with batch_size == 0, let's return early in this case
if (batch_size == 0) {
return;
}
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * lda);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
// magmaLuBatched leaves ipiv_data values unwritten for singular matrices.
// Initialize to avoid memory access violations inside magma kernels (gh-51930).
std::fill_n(ipiv_data, batch_size * n, 1);
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, lda, ipiv_array, infos_lu_data,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur_getri = &infos_getri_data[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, lda, ipiv_array_cur, self_inv_array_cur,
lda, info_array_cur_getri, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], lda, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
lda, &infos_getri_data[mini_idx], batch_size % batch_limit, magma_queue);
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
// magmaLu and magmaGetri requires infos tensor to live on CPU
infos_lu = infos_lu.to(at::kCPU);
infos_getri = infos_getri.to(at::kCPU);
Tensor ipiv = at::empty({lda}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, lda, ipiv.data_ptr<magma_int_t>(), infos_lu.data_ptr<magma_int_t>());
magmaGetri<scalar_t>(
n, self_data, lda, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, infos_getri.data_ptr<magma_int_t>());
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
auto infos_lu = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto infos_getri = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos_lu, infos_getri);
});
batchCheckErrors(infos_lu, "inverse_cuda");
batchCheckErrors(infos_getri, "inverse_cuda");
} else {
// magmaLu and magmaGetri requires infos tensor to live on CPU
auto infos_lu = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
auto infos_getri = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, infos_lu, infos_getri);
});
singleCheckErrors(infos_lu.item().toInt(), "inverse_cuda");
singleCheckErrors(infos_getri.item().toInt(), "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// This is a type dispatching helper function for 'apply_batched_inverse' and 'singleCheckErrors'
Tensor& _linalg_inv_out_helper_cuda_legacy(Tensor& result, Tensor& infos_lu, Tensor& infos_getri) {
// assuming result is in column major order and contains the matrices to invert
if (result.dim() > 2) {
auto input_working_copy = cloneBatchedColumnMajor(result);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse<scalar_t>(
input_working_copy, result, infos_lu, infos_getri);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse<scalar_t>(result, infos_lu, infos_getri);
});
}
return result;
}
// This is a MAGMA/cuSOLVER dispatching helper function
Tensor& _linalg_inv_out_helper_cuda(Tensor &result, Tensor& infos_lu, Tensor& infos_getri) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
#ifdef USE_CUSOLVER
if ((result.dim() == 2) || (/* result.dim() > 2 && */ batchCount(result) <= 2) || !use_magma_) {
return _linalg_inv_out_helper_cuda_lib(result, infos_lu, infos_getri); // cusolver or cublas
} else {
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
}
#else
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
#endif
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, lda,
b_data, lda, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, lda, b_array_cur, lda,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda_magma(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// Todo: cusolverDn<T>potrsBatched only supports nrhs == 1 and does not have good performance.
// Batched cholesky_solve is dispatched to magma.
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(self) == 1 || !use_magma_) {
return _cholesky_solve_helper_cuda_cusolver(self, A, upper);
} else {
return _cholesky_solve_helper_cuda_magma(self, A, upper);
}
#else
return _cholesky_solve_helper_cuda_magma(self, A, upper);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
auto lda = std::max<magma_int_t>(1, n);
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
int64_t batch_limit = self.is_complex() ? 65535 : 262140;
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information)
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, lda, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaCholeskyBatched<scalar_t>(
uplo, n, &self_array[mini_idx], lda, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda_magma(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor result;
if (self.dim() > 2) {
// MAGMA's batched cholesky operator has an off-by-one error causing IMA
// (see https://github.com/pytorch/pytorch/issues/42666). This code is based
// on the #cloneBatchedColumnMajor function however it pads the input with
// one extra element utilizing the fact that the resize_as_ method preserves
// the storage even if it's larger than the new sizes. This way if MAGMA
// reads off bounds it will still be valid user memory.
const Tensor input = upper ? self : self.transpose(-1, -2);
result = at::empty(input.numel() + 1, input.options());
result.resize_as_(input).copy_(input).transpose_(-1, -2);
} else {
result = cloneBatchedColumnMajor(upper ? self.transpose(-1, -2) : self);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
self.scalar_type(), "cholesky_cuda", [&] {
apply_cholesky<scalar_t>(result, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
return upper ? result.transpose_(-1, -2) : result;
}
// Todo: cusolverDnXpotrfBatched has some numerical issue and is not used
// here. Batched cholesky is dispatched to magma.
// We will switch to cusolverDnXpotrfBatched after the issue is fixed.
// See https://github.com/pytorch/pytorch/issues/53879.
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(self) == 1 || !use_magma_) {
return _cholesky_helper_cuda_cusolver(self, upper);
}
else {
return _cholesky_helper_cuda_magma(self, upper);
}
#else
return _cholesky_helper_cuda_magma(self, upper);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of a symmetric (Hermitian) positive-definite matrix n-by-n matrix 'input' using the Cholesky solver
This is an in-place routine, content of 'input' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
MAGMA requires 'infos' to reside in CPU memory.
For more information see MAGMA's documentation for POTRS routine.
*/
template <typename scalar_t>
static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA.");
#else
// magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally
// it transfers data several times between GPU and CPU and calls lapack routine on CPU
// using magmaCholeskySolveBatched is a lot faster
// note that magmaCholeskySolve is also slow
// 'input' is modified in-place we need to clone it and replace with a diagonal matrix
// for apply_cholesky_solve
auto input_working_copy = cloneBatchedColumnMajor(input);
// 'input' tensor has to be a batch of diagonal matrix
input.fill_(0);
input.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
Tensor result_u, input_u;
if (input.dim() == 2) {
// unsqueezing here so that the batched version is used
result_u = input.unsqueeze(0);
input_u = input_working_copy.unsqueeze(0);
} else {
result_u = input;
input_u = input_working_copy;
}
// magma's potrs_batched doesn't take matrix-wise array of ints as an 'info' argument
// it returns a single 'magma_int_t'
// if info = 0 the operation is successful, if info = -i, the i-th parameter had an illegal value.
int64_t info_tmp = 0;
apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp);
infos.fill_(info_tmp);
#endif
}
// This is a type dispatching helper function for 'apply_cholesky_inverse'
Tensor& cholesky_inverse_kernel_impl_magma(Tensor &result, Tensor& infos, bool upper) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_inverse_out_cuda", [&]{
apply_cholesky_inverse<scalar_t>(result, infos, upper);
});
return result;
}
Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
// the content of result is overwritten by 'apply_cholesky_inverse'
#ifdef USE_CUSOLVER
if (batchCount(result) == 1 || !use_magma_) {
return cholesky_inverse_kernel_impl_cusolver(result, infos, upper);
} else {
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
}
#else
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
#endif
}
REGISTER_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_int_t k = std::min(m, n);
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({k}, at::kInt);
magmaLu<scalar_t>(
m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
m, n, self_array, m, pivots_array,
infos.data_ptr<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
m, n, self_array, m, infos.data_ptr<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
auto m = self.size(-2);
auto n = self.size(-1);
auto k = std::min(m, n);
auto req_size = self.sizes().vec();
req_size.pop_back();
req_size.back() = k;
Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true);
} else {
batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true);
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve_batched(Tensor& A, Tensor& b, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
trans = conjugate_transpose ? MagmaConjTrans : trans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
// magma returns early if m <= 0 || n <= 0 for magmaTriangularSolveBatched
// magmaTriangularSolve is calling cuBLAS and it prints
// ** On entry to DTRSM parameter number 9 had an illegal value
// so let's use proper lda parameter here
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit;
int64_t mini_idx; // this is outside the loop because it is used for the case batch_size % batch_limit != 0
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
lda, b_array_cur, lda, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
lda, &b_array[mini_idx], lda, batch_size % batch_limit, magma_queue);
}
#endif
}
void triangular_solve_batched_magma(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
void triangular_solve_kernel(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
// For batches smaller than 8 and matrix sizes larger than 64x64 cuBLAS forloop is faster than batched version
if (batchCount(A) <= 8 && A.size(-1) >= 64) {
triangular_solve_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
#ifndef USE_MAGMA
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
#else
// cuBLAS batched is faster than MAGMA batched up until 512x512, after that MAGMA is faster
if (A.size(-1) <= 512) {
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
triangular_solve_batched_magma(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
}
#endif // USE_MAGMA
}
}
REGISTER_DISPATCH(triangular_solve_stub, &triangular_solve_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ orgqr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tensor& orgqr_kernel_impl(Tensor& result, const Tensor& tau, Tensor& infos, int64_t n_columns) {
// TODO: It is possible to implement efficient batched orgqr for small tau (tau.size(-1) <= 32)
// using MAGMA, however it fails on Windows because of some illegal memory reads inside MAGMA.
// See discussions in https://github.com/pytorch/pytorch/pull/51348 for comparison of cuSOLVER-MAGMA
// and Windows failure.
// For reference here is the MAGMA-based implementation: https://gist.github.com/IvanYashchuk/2db50002c9d3c1462ff769e6410ad983
#if defined(USE_CUSOLVER)
return orgqr_helper_cuda_lib(result, tau, infos, n_columns); // cusolver
#else
TORCH_CHECK(false, "Calling torch.orgqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(orgqr_stub, &orgqr_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t q_size_minus_2, int64_t r_size_minus_1, int64_t n_columns,
bool compute_q, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_int_t m = magma_int_cast(q_size_minus_2, "Q.size(-2)");
magma_int_t n = magma_int_cast(r_size_minus_1, "R.size(-1)");
auto r_data = R.data_ptr<scalar_t>();
auto r_matrix_stride = matrixStride(R);
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
infos[i] = info;
if (info != 0) {
return;
}
}
if (!compute_q) {
// this is for mode='r'
return;
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
auto q_data = Q.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
infos[i] = info;
if (info != 0) {
return;
}
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor,Tensor> _linalg_qr_helper_cuda(const Tensor& self, std::string mode) {
bool compute_q, reduced;
std::tie(compute_q, reduced) = _parse_qr_mode(mode);
std::vector<int64_t> infos(batchCount(self), 0);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
int64_t n = self.size(-1);
r_working_copy = at::empty({n_columns_q, n}, self.options());
if (compute_q) {
int64_t n_rows_q = q_sizes[self.dim() - 2];
q_working_copy = at::eye(n_rows_q, n_columns_q, self.options());
} else {
q_working_copy = at::empty({0}, self.options());
}
return std::make_tuple(q_working_copy, r_working_copy);
}
if (compute_q) {
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
} else {
q_working_copy = at::empty({0}, self.options());
}
r_working_copy = cloneBatchedColumnMajor(self);
int64_t m = q_sizes[self.dim() - 2];
int64_t n = r_working_copy.size(-1);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, m, n, n_columns_q, compute_q, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "qr_cuda");
} else {
singleCheckErrors(infos[0], "qr_cuda");
}
if (compute_q) {
q_working_copy = q_working_copy.narrow(-1, 0, n_columns_q);
}
r_working_copy = r_working_copy.narrow(-2, 0, n_columns_q).triu();
return std::make_tuple(q_working_copy, r_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_magma_eigh(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.eigh/eigvalsh on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == kCPU);
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(vectors.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, lda * lda);
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_data, lda, values_data,
wA, lda, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, infos_data);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(std::max<int64_t>(1, real_impl<scalar_t, value_t>(wkopt)), "work_size");
liwork = magma_int_cast(std::max<int64_t>(1, iwkopt), "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (vectors.is_complex()) {
lrwork = magma_int_cast(std::max<int64_t>(1, rwkopt), "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
magma_int_t* info_working_ptr = &infos_data[i];
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr,
wA, lda, work, lwork, rwork, lrwork, iwork, liwork, info_working_ptr);
// The current behaviour for Linear Algebra functions to raise an error if something goes wrong
// or input doesn't satisfy some requirement
// therefore return early since further computations will be wasted anyway
if (*info_working_ptr != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
Tensor infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt).device(at::kCPU));
auto eigvals_shape = IntArrayRef(self.sizes().data(), self.dim()-1); // self.shape[:-1]
ScalarType real_dtype = toValueType(self.scalar_type());
// magmaSyevd uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(eigvals_shape, self.options().dtype(real_dtype))
: at::empty(eigvals_shape, self.options().dtype(real_dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_magma_eigh<scalar_t>(eigvals_working_copy, self_working_copy, infos, upper, eigenvectors);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eigh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This is a type dispatch function for 'apply_magma_eigh'
// For small inputs result is computed on CPU
void linalg_eigh_magma(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
// MAGMA just calls LAPACK for eigenvectors.size(-1) <= 128
// See https://bitbucket.org/icl/magma/src/e6fdca447bd402693e8b0b950a898b6879bbcc41/src/zheevd_gpu.cpp?at=master#lines-258
// in addition lda is ignored breaking 0x0 inputs
if (eigenvectors.size(-1) > 128) {
// MAGMA requires eigenvalues and infos tensors to reside on CPU
Tensor eigenvalues_cpu = eigenvalues.to(kCPU);
infos = infos.to(kCPU);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
eigenvectors.scalar_type(), "linalg_eigh_cpu", [&] {
apply_magma_eigh<scalar_t>(
eigenvalues_cpu, eigenvectors, infos, upper, compute_eigenvectors);
});
// Transfer computed by MAGMA results from CPU to GPU
eigenvalues.copy_(eigenvalues_cpu);
} else { // eigenvectors.size(-1) <= 128
// transfer to CPU, compute the result and copy back to GPU
// this is faster than going through MAGMA that does the same
Tensor eigenvalues_cpu = at::empty_like(eigenvalues, eigenvalues.options().device(kCPU));
if (compute_eigenvectors) {
Tensor eigenvectors_cpu = at::empty_like(eigenvectors, eigenvectors.options().device(kCPU));
at::linalg_eigh_out(eigenvalues_cpu, eigenvectors_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
eigenvectors.copy_(eigenvectors_cpu);
} else {
at::linalg_eigvalsh_out(eigenvalues_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
}
eigenvalues.copy_(eigenvalues_cpu);
}
}
void linalg_eigh_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#if defined(USE_CUSOLVER)
linalg_eigh_cusolver(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#else
linalg_eigh_magma(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#endif
}
REGISTER_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU
// memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy
// the returned values from CPU to GPU. See also magmaSymeig, which uses a
// similar approach.
template <typename scalar_t>
static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs,
int64_t *info_ptr) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor");
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto self_data = self.data_ptr<scalar_t>();
auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>();
scalar_t *wr = out_eigvals_data;
scalar_t *vr_data = NULL;
magma_int_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = out_eigvecs.data_ptr<scalar_t>();
ldvr = n;
}
value_t *rwork_data = nullptr;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
ALLOCATE_ARRAY(rwork_data, value_t, n*2);
}
if (n > 0) {
// call magmaEig once to get the optimal size of work_data
scalar_t wkopt;
magma_int_t info;
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, &wkopt, -1, rwork_data, &info);
magma_int_t lwork = static_cast<magma_int_t>(real_impl<scalar_t, value_t>(wkopt));
// call it a 2nd time to to the actual work
scalar_t *work_data = nullptr;
ALLOCATE_ARRAY(work_data, scalar_t, lwork);
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, work_data, lwork, rwork_data, &info);
*info_ptr = info;
}
#endif
}
/*
* Internal helper; like eig_cuda but:
* 1. assume that self is a square matrix of side "n"
* 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory
* by the caller
*/
std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) {
int64_t n = self.size(-1);
// copy self to pinned CPU memory
auto self_working_copy = at::empty_strided(
{n, n}, // square matrix
{1, n}, // column-ordered, as magmaEig expects
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
// tensors holding the results. We use empty_strided to make them column-ordered
auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor out_eigvals;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
out_eigvals = at::empty({n}, options);
} else {
out_eigvals = at::empty_strided({n, 2}, {1, n}, options);
}
auto out_eigvecs = eigenvectors
? at::empty_strided({n, n}, {1, n}, options)
: Tensor();
int64_t info;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cuda", [&]{
apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info);
});
singleCheckErrors(info, "eig_cuda");
return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs);
}
REGISTER_DISPATCH(eig_stub, &eig_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the eigenvalues and eigenvectors of n-by-n matrix 'input'.
This is an in-place routine, content of 'input', 'values', 'vectors' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
For more information see MAGMA's documentation for GEEV routine.
*/
template <typename scalar_t>
void apply_linalg_eig(Tensor& values, Tensor& vectors, Tensor& input, Tensor& infos, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.linalg.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.linalg.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == at::kCPU);
if (compute_eigenvectors) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(vectors.device() == at::kCPU);
}
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_vec_t jobvl = MagmaNoVec; // only right eigenvectors are computed
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(input);
auto input_matrix_stride = matrixStride(input);
auto values_stride = values.size(-1);
auto input_data = input.data_ptr<scalar_t>();
auto values_data = values.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto rvectors_data = compute_eigenvectors ? vectors.data_ptr<scalar_t>() : nullptr;
scalar_t* lvectors_data = nullptr; // only right eigenvectors are computed
int64_t ldvr = compute_eigenvectors ? lda : 1;
int64_t ldvl = 1;
Tensor rwork;
value_t* rwork_data = nullptr;
if (input.is_complex()) {
ScalarType real_dtype = toValueType(input.scalar_type());
rwork = at::empty({lda * 2}, input.options().dtype(real_dtype));
rwork_data = rwork.data_ptr<value_t>();
}
// call magmaEig once to get the optimal size of work_data
scalar_t work_query;
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_data, lda, values_data,
lvectors_data, ldvl, rvectors_data, ldvr, &work_query, -1, rwork_data, &infos_data[0]);
magma_int_t lwork = std::max<magma_int_t>(1, static_cast<magma_int_t>(real_impl<scalar_t, value_t>(work_query)));
Tensor work = at::empty({lwork}, input.dtype());
auto work_data = work.data_ptr<scalar_t>();
for (auto i = decltype(batch_size){0}; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* values_working_ptr = &values_data[i * values_stride];
scalar_t* rvectors_working_ptr = compute_eigenvectors ? &rvectors_data[i * input_matrix_stride] : nullptr;
int* info_working_ptr = &infos_data[i];
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_working_ptr, lda, values_working_ptr,
lvectors_data, ldvl, rvectors_working_ptr, ldvr, work_data, lwork, rwork_data, info_working_ptr);
}
#endif
}
// This is a type dispatching helper function for 'apply_linalg_eig'
void linalg_eig_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, const Tensor& input, bool compute_eigenvectors) {
// This function calculates the non-symmetric eigendecomposition in-place
// tensors should be in batched column major memory format
// the content of eigenvalues, eigenvectors and infos is overwritten by 'apply_linalg_eig'
// apply_linalg_eig modifies the provided input matrix in-place, therefore we need a copy
// MAGMA doesn't have GPU interface for the eigendecomposition and it forces us to transfer 'input' to CPU
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.is_cuda());
Tensor input_working_copy = at::empty(input.sizes(), input.options().device(kCPU));
input_working_copy.transpose_(-2, -1); // make input_working_copy to have Fortran contiguous memory layout
input_working_copy.copy_(input);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "linalg_eig_out_cuda", [&]{
apply_linalg_eig<scalar_t>(eigenvalues, eigenvectors, input_working_copy, infos, compute_eigenvectors);
});
}
REGISTER_DISPATCH(linalg_eig_stub, &linalg_eig_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto lda = std::max<magma_int_t>(1, m);
auto ldvt = std::max<magma_int_t>(1, n);
auto mn = std::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt = 1; // MAGMA might not set the value for the optimal workspace therefore use 1 as the default value
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, lda,
S_working_ptr, U_working_ptr, lda, VT_working_ptr, ldvt, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_legacy(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = std::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (!compute_uv) {
VT_working_copy.zero_();
U_working_copy.zero_();
}
if (some) {
VT_working_copy = VT_working_copy.narrow(-2, 0, k);
}
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
// Note that the 'apply_svd' routine returns VT = V^T (for real inputs) or VT = V^H (for complex inputs), not V.
VT_working_copy = VT_working_copy.conj();
VT_working_copy.transpose_(-2, -1);
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
#ifdef USE_CUSOLVER
return _svd_helper_cuda_lib(self, some, compute_uv);
#else
return _svd_helper_cuda_legacy(self, some, compute_uv);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("lu_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto n = lu.size(-2);
auto nrhs = b.size(-1);
int info_tmp = 0;
if (b.dim() == 2) {
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
} else {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaLuSolveBatched<scalar_t>(
n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data);
auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous();
if (self.numel() == 0 || LU_data.numel() == 0) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{
apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info);
});
TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lstsq ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tensor& _lstsq_helper_cuda(
Tensor& b, Tensor& rank, Tensor& singular_values, Tensor& infos, const Tensor& a, double cond, std::string driver_name) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "torch.linalg.lstsq: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "torch.linalg.lstsq_cuda", [&] {
auto trans = MagmaNoTrans;
auto m = magma_int_cast(a.size(-2), "m");
auto n = magma_int_cast(a.size(-1), "n");
auto nrhs = magma_int_cast(b.size(-1), "nrhs");
auto ldda = std::max<magma_int_t>(1, m);
auto lddb = std::max<magma_int_t>(1, std::max(m, n));
auto nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
auto lwork = (m - n + nb) * (nrhs + nb) + nrhs * nb;
Tensor hwork = at::empty({static_cast<int64_t>(lwork)}, a.scalar_type());
auto* hwork_ptr = hwork.data_ptr<scalar_t>();
// MAGMA requires infos tensor to live on CPU
infos = infos.to(at::kCPU);
auto infos_data = infos.data_ptr<magma_int_t>();
batch_iterator_with_broadcasting<scalar_t>(a, b,
[&](scalar_t* a_working_ptr, scalar_t* b_working_ptr,
int64_t a_linear_batch_idx) {
magma_int_t* infos_working_ptr = &infos_data[a_linear_batch_idx];
magmaGels<scalar_t>(trans, m, n, nrhs,
a_working_ptr, ldda, b_working_ptr, lddb,
hwork_ptr, lwork, infos_working_ptr);
}
);
});
return b;
#endif
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
1b092edbb3b3334811411af50ca64f51cf1be0b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"{
__device__ float lap(float shar[14][14][14], float ddm2, int x, int y, int z, int ox, int oy, int oz, int c){
int m = c - 1;
int p = c + 1;
return ddm2 *
(shar[x + ox + m][y + oy + c][z + oz + c] + shar[x + ox + p][y + oy + c][z + oz + c]
+ shar[x + ox + c][y + oy + m][z + oz + c] + shar[x + ox + c][y + oy + p][z + oz + c]
+ shar[x + ox + c][y + oy + c][z + oz + m] + shar[x + ox + c][y + oy + c][z + oz + p]
- 6.0f * shar[x + ox + c][y + oy + c][z + oz + c]);
}
__global__ void kernel_timestep(float* in, float* out, int sx, int sy, int sz, float ddm2){
// ddm2 = 1.0f;
float dt = 0.05f;
float r = -0.25f;
__shared__ float cache[14][14][14];
__shared__ float cache_lap[14][14][14]; // 12x12x12
__shared__ float cache_lap2[14][14][14]; // 10x10x10
__shared__ float cache_lap3[14][14][14]; // 8x8x8
int sq14 = 14 * 14;
int x = threadIdx.x;
int y = threadIdx.y;
int z = threadIdx.z;
int bx = blockIdx.x * 8;
int by = blockIdx.y * 8;
int bz = blockIdx.z * 8;
int load_idx = (z * 8 + y) * 8 + x;
int load_y = load_idx / 14;
int load_z = load_idx % 14;
int base_addr = ((bz + load_z) * sy + by + load_y) * sx + bx;
if(load_idx < sq14){
for(int load_x = 0; load_x < 14; load_x++){
cache[load_x][load_y][load_z] = in[base_addr + load_x];
}
}
__syncthreads();
// cache is filled now
// ********************************************************************************
// compute lap
// phase 0,0,0
int ox = 0; int oy = 0; int oz = 0;
cache_lap[x + ox + 1][y + oy + 1][z + oz + 1] = lap(cache, ddm2, x, y, z, ox, oy, oz, 1);
__syncthreads();
// phase x,0,0
ox = 8; oy = 0; oz = 0;
if(x < 4) cache_lap[x + ox + 1][y + oy + 1][z + oz + 1] = lap(cache, ddm2, x, y, z, ox, oy, oz, 1);
__syncthreads();
// phase 0,y,0
ox = 0; oy = 8; oz = 0;
if(y < 4) cache_lap[x + ox + 1][y + oy + 1][z + oz + 1] = lap(cache, ddm2, x, y, z, ox, oy, oz, 1);
__syncthreads();
// phase 0,0,z
ox = 0; oy = 0; oz = 8;
if(z < 4) cache_lap[x + ox + 1][y + oy + 1][z + oz + 1] = lap(cache, ddm2, x, y, z, ox, oy, oz, 1);
__syncthreads();
// phase x,y,0
ox = 8; oy = 8; oz = 0;
if(x < 4 && y < 4) cache_lap[x + ox + 1][y + oy + 1][z + oz + 1] = lap(cache, ddm2, x, y, z, ox, oy, oz, 1);
__syncthreads();
// phase x,0,z
ox = 8; oy = 0; oz = 8;
if(x < 4 && z < 4) cache_lap[x + ox + 1][y + oy + 1][z + oz + 1] = lap(cache, ddm2, x, y, z, ox, oy, oz, 1);
__syncthreads();
// phase 0,y,z
ox = 0; oy = 8; oz = 8;
if(y < 4 && z < 4) cache_lap[x + ox + 1][y + oy + 1][z + oz + 1] = lap(cache, ddm2, x, y, z, ox, oy, oz, 1);
__syncthreads();
// phase x,y,z
ox = 8; oy = 8; oz = 8;
if(x < 4 && y < 4 && z < 4) cache_lap[x + ox + 1][y + oy + 1][z + oz + 1] = lap(cache, ddm2, x, y, z, ox, oy, oz, 1);
__syncthreads();
// ********************************************************************************
// ********************************************************************************
// compute lap2
// phase 0,0,0
ox = 0; oy = 0; oz = 0;
cache_lap2[x + ox + 2][y + oy + 2][z + oz + 2] = lap(cache_lap, ddm2, x, y, z, ox, oy, oz, 2);
__syncthreads();
// phase x,0,0
ox = 8; oy = 0; oz = 0;
if(x < 2) cache_lap2[x + ox + 2][y + oy + 2][z + oz + 2] = lap(cache_lap, ddm2, x, y, z, ox, oy, oz, 2);
__syncthreads();
// phase 0,y,0
ox = 0; oy = 8; oz = 0;
if(y < 2) cache_lap2[x + ox + 2][y + oy + 2][z + oz + 2] = lap(cache_lap, ddm2, x, y, z, ox, oy, oz, 2);
__syncthreads();
// phase 0,0,z
ox = 0; oy = 0; oz = 8;
if(z < 2) cache_lap2[x + ox + 2][y + oy + 2][z + oz + 2] = lap(cache_lap, ddm2, x, y, z, ox, oy, oz, 2);
__syncthreads();
// phase x,y,0
ox = 8; oy = 8; oz = 0;
if(x < 2 && y < 2) cache_lap2[x + ox + 2][y + oy + 2][z + oz + 2] = lap(cache_lap, ddm2, x, y, z, ox, oy, oz, 2);
__syncthreads();
// phase x,0,z
ox = 8; oy = 0; oz = 8;
if(x < 2 && z < 2) cache_lap2[x + ox + 2][y + oy + 2][z + oz + 2] = lap(cache_lap, ddm2, x, y, z, ox, oy, oz, 2);
__syncthreads();
// phase 0,y,z
ox = 0; oy = 8; oz = 8;
if(y < 2 && z < 2) cache_lap2[x + ox + 2][y + oy + 2][z + oz + 2] = lap(cache_lap, ddm2, x, y, z, ox, oy, oz, 2);
__syncthreads();
// phase x,y,z
ox = 8; oy = 8; oz = 8;
if(x < 2 && y < 2 && z < 2) cache_lap2[x + ox + 2][y + oy + 2][z + oz + 2] = lap(cache_lap, ddm2, x, y, z, ox, oy, oz, 2);
__syncthreads();
// ********************************************************************************
// ********************************************************************************
// compute lap3
cache_lap3[x + 3][y + 3][z + 3] = lap(cache_lap2, ddm2, x, y, z, 0, 0, 0, 3);
__syncthreads();
// ********************************************************************************
float c = cache[x + 3][y + 3][z + 3];
float xm = cache[x + 2][y + 3][z + 3];
float xp = cache[x + 4][y + 3][z + 3];
float ym = cache[x + 3][y + 2][z + 3];
float yp = cache[x + 3][y + 4][z + 3];
float zm = cache[x + 3][y + 3][z + 2];
float zp = cache[x + 3][y + 3][z + 4];
float lapPsi3 = ddm2 *
(xm * xm * xm + xp * xp * xp
+ ym * ym * ym + yp * yp * yp
+ zm * zm * zm + zp * zp * zp
- 6.0f * c * c * c);
// ********************************************************************************
// PFC
// out[((bz + z + 3) * sy + by + y + 3) * sx + bx + x + 3] =
// cache[x + 3][y + 3][z + 3]
// + dt * (lapPsi3
// + (1.0f + r) * cache_lap[x + 3][y + 3][z + 3]
// + 2.0f * cache_lap2[x + 3][y + 3][z + 3]
// + cache_lap3[x + 3][y + 3][z + 3]);
// ********************************************************************************
// ********************************************************************************
// PFC semi-implicit
out[((bz + z + 3) * sy + by + y + 3) * sx + bx + x + 3] =
1 / (1.0f - dt * (- 6.0f * (r + 1.0f) * ddm2 + 84.0f * ddm2 * ddm2 - 324.0f * ddm2 * ddm2 * ddm2))
* (c + dt * (lapPsi3
+ (1.0f + r) * (cache_lap[x + 3][y + 3][z + 3] + 6.0f * c * ddm2)
+ 2.0f * (cache_lap2[x + 3][y + 3][z + 3] - 42.0f * c * ddm2 * ddm2)
+ (cache_lap3[x + 3][y + 3][z + 3] + 324.0f * c * ddm2 * ddm2 * ddm2)));
// ********************************************************************************
// ********************************************************************************
// LAPLACE
// out[((bz + z + 3) * sy + by + y + 3) * sx + bx + x + 3] = 0.16666666666f * (cache_lap[x + 3][y + 3][z + 3] + 6.0f * cache[x + 3][y + 3][z + 3]);
// ********************************************************************************
// ********************************************************************************
// ID
// out[((bz + z + 3) * sy + by + y + 3) * sx + bx + x + 3] = cache_lap[x + 3][y + 3][z + 3];
// ********************************************************************************
// ********************************************************************************
// ID
// out[((bz + z + 3) * sy + by + y + 3) * sx + bx + x + 3] = in[((bz + z + 3) * sy + by + y + 3) * sx + bx + x + 3];
// ********************************************************************************
}
__device__ int ifun(int sx, int sy, int sz, int x, int y, int z){ return (z * sy + y) * sx + x; }
#define I(x,y,z) ifun(sx, sy, sz, x, y, z)
__device__ int mod(float a, float b){ return ((int)a) - ((int)b) * floor(a / b); }
__device__ int wrap(int a, int b){ return mod(a - 3, b) + 3; }
__global__ void kernel_pbc_noz(float* ar, int sx, int sy, int sz){
int sxp = sx - 6;
int syp = sy - 6;
int szp = sz - 6;
int a = threadIdx.x;
int b = threadIdx.y;
// threadblock: 16x16
int x_mul_max = sx / 16;
int y_mul_max = sy / 16;
int z_mul_max = sz / 16;
// x - y
// for(int x_mul = 0; x_mul <= x_mul_max; x_mul++){
// for(int y_mul = 0; y_mul <= y_mul_max; y_mul++){
// int xx = 16 * x_mul + a;
// int yy = 16 * y_mul + b;
// if(xx < sx && yy < sy){
// int xc = wrap(xx, sxp);
// int yc = wrap(yy, syp);
// ar[I(xx, yy, sz - 3)] = ar[I(xc, yc, 3)];
// ar[I(xx, yy, sz - 2)] = ar[I(xc, yc, 4)];
// ar[I(xx, yy, sz - 1)] = ar[I(xc, yc, 5)];
// ar[I(xx, yy, 2)] = ar[I(xc, yc, sz - 4)];
// ar[I(xx, yy, 1)] = ar[I(xc, yc, sz - 5)];
// ar[I(xx, yy, 0)] = ar[I(xc, yc, sz - 6)];
// }
// }
// }
// x - z
for(int x_mul = 0; x_mul <= x_mul_max; x_mul++){
for(int z_mul = 0; z_mul <= z_mul_max; z_mul++){
int xx = 16 * x_mul + a;
int zz = 16 * z_mul + b;
if(xx < sx && zz < sz){
int xc = wrap(xx, sxp);
int zc = wrap(zz, szp);
ar[I(xx, sy - 3, zz)] = ar[I(xc, 3, zc)];
ar[I(xx, sy - 2, zz)] = ar[I(xc, 4, zc)];
ar[I(xx, sy - 1, zz)] = ar[I(xc, 5, zc)];
ar[I(xx, 2, zz)] = ar[I(xc, sy - 4, zc)];
ar[I(xx, 1, zz)] = ar[I(xc, sy - 5, zc)];
ar[I(xx, 0, zz)] = ar[I(xc, sy - 6, zc)];
}
}
}
// y - z
for(int y_mul = 0; y_mul <= y_mul_max; y_mul++){
for(int z_mul = 0; z_mul <= z_mul_max; z_mul++){
int yy = 16 * y_mul + a;
int zz = 16 * z_mul + b;
if(yy < sy && zz < sz){
int yc = wrap(yy, syp);
int zc = wrap(zz, szp);
ar[I(sx - 3, yy, zz)] = ar[I(3, yc, zc)];
ar[I(sx - 2, yy, zz)] = ar[I(4, yc, zc)];
ar[I(sx - 1, yy, zz)] = ar[I(5, yc, zc)];
ar[I(2, yy, zz)] = ar[I(sx - 4, yc, zc)];
ar[I(1, yy, zz)] = ar[I(sx - 5, yc, zc)];
ar[I(0, yy, zz)] = ar[I(sx - 6, yc, zc)];
}
}
}
}
__global__ void kernel_ghost_copy(float* ar, float* out_left, float* out_right, int sx, int sy, int sz){
int y_mul_max = sy / 256;
for(int z = 0; z < 3; z++){
for(int y_mul = 0; y_mul <= y_mul_max; y_mul++){
int y = y_mul * 256 + threadIdx.x;
if(y < sy){
int base_addr_lin = (z * sy + y) * sx;
int base_addr_left = ((z + 3) * sy + y) * sx;
int base_addr_right = ((sz - 6 + z) * sy + y) * sx;
for(int x = 0; x < sx; x++){
out_left[base_addr_lin + x] = ar[base_addr_left + x];
out_right[base_addr_lin + x] = ar[base_addr_right + x];
}
}
}
}
}
__global__ void kernel_ghost_copy_inv(float* ar, float* out_left, float* out_right, int sx, int sy, int sz){
int y_mul_max = sy / 256;
for(int z = 0; z < 3; z++){
for(int y_mul = 0; y_mul <= y_mul_max; y_mul++){
int y = y_mul * 256 + threadIdx.x;
if(y < sy){
int base_addr_lin = (z * sy + y) * sx;
int base_addr_left = ((z + 0) * sy + y) * sx;
int base_addr_right = ((sz - 3 + z) * sy + y) * sx;
for(int x = 0; x < sx; x++){
ar[base_addr_left + x] = out_left[base_addr_lin + x];
ar[base_addr_right + x] = out_right[base_addr_lin + x];
}
}
}
}
}
__global__ void kernel_source(float* ar, int sx, int sy, int sz){
ar[((3) * sy + 3) * sx + 3] = 1.0f;
// ar[((3 + 200) * sy + 3) * sx + 3 + 1200] = 1.0f;
}
}
| 1b092edbb3b3334811411af50ca64f51cf1be0b7.cu | extern "C"{
__device__ float lap(float shar[14][14][14], float ddm2, int x, int y, int z, int ox, int oy, int oz, int c){
int m = c - 1;
int p = c + 1;
return ddm2 *
(shar[x + ox + m][y + oy + c][z + oz + c] + shar[x + ox + p][y + oy + c][z + oz + c]
+ shar[x + ox + c][y + oy + m][z + oz + c] + shar[x + ox + c][y + oy + p][z + oz + c]
+ shar[x + ox + c][y + oy + c][z + oz + m] + shar[x + ox + c][y + oy + c][z + oz + p]
- 6.0f * shar[x + ox + c][y + oy + c][z + oz + c]);
}
__global__ void kernel_timestep(float* in, float* out, int sx, int sy, int sz, float ddm2){
// ddm2 = 1.0f;
float dt = 0.05f;
float r = -0.25f;
__shared__ float cache[14][14][14];
__shared__ float cache_lap[14][14][14]; // 12x12x12
__shared__ float cache_lap2[14][14][14]; // 10x10x10
__shared__ float cache_lap3[14][14][14]; // 8x8x8
int sq14 = 14 * 14;
int x = threadIdx.x;
int y = threadIdx.y;
int z = threadIdx.z;
int bx = blockIdx.x * 8;
int by = blockIdx.y * 8;
int bz = blockIdx.z * 8;
int load_idx = (z * 8 + y) * 8 + x;
int load_y = load_idx / 14;
int load_z = load_idx % 14;
int base_addr = ((bz + load_z) * sy + by + load_y) * sx + bx;
if(load_idx < sq14){
for(int load_x = 0; load_x < 14; load_x++){
cache[load_x][load_y][load_z] = in[base_addr + load_x];
}
}
__syncthreads();
// cache is filled now
// ********************************************************************************
// compute lap
// phase 0,0,0
int ox = 0; int oy = 0; int oz = 0;
cache_lap[x + ox + 1][y + oy + 1][z + oz + 1] = lap(cache, ddm2, x, y, z, ox, oy, oz, 1);
__syncthreads();
// phase x,0,0
ox = 8; oy = 0; oz = 0;
if(x < 4) cache_lap[x + ox + 1][y + oy + 1][z + oz + 1] = lap(cache, ddm2, x, y, z, ox, oy, oz, 1);
__syncthreads();
// phase 0,y,0
ox = 0; oy = 8; oz = 0;
if(y < 4) cache_lap[x + ox + 1][y + oy + 1][z + oz + 1] = lap(cache, ddm2, x, y, z, ox, oy, oz, 1);
__syncthreads();
// phase 0,0,z
ox = 0; oy = 0; oz = 8;
if(z < 4) cache_lap[x + ox + 1][y + oy + 1][z + oz + 1] = lap(cache, ddm2, x, y, z, ox, oy, oz, 1);
__syncthreads();
// phase x,y,0
ox = 8; oy = 8; oz = 0;
if(x < 4 && y < 4) cache_lap[x + ox + 1][y + oy + 1][z + oz + 1] = lap(cache, ddm2, x, y, z, ox, oy, oz, 1);
__syncthreads();
// phase x,0,z
ox = 8; oy = 0; oz = 8;
if(x < 4 && z < 4) cache_lap[x + ox + 1][y + oy + 1][z + oz + 1] = lap(cache, ddm2, x, y, z, ox, oy, oz, 1);
__syncthreads();
// phase 0,y,z
ox = 0; oy = 8; oz = 8;
if(y < 4 && z < 4) cache_lap[x + ox + 1][y + oy + 1][z + oz + 1] = lap(cache, ddm2, x, y, z, ox, oy, oz, 1);
__syncthreads();
// phase x,y,z
ox = 8; oy = 8; oz = 8;
if(x < 4 && y < 4 && z < 4) cache_lap[x + ox + 1][y + oy + 1][z + oz + 1] = lap(cache, ddm2, x, y, z, ox, oy, oz, 1);
__syncthreads();
// ********************************************************************************
// ********************************************************************************
// compute lap2
// phase 0,0,0
ox = 0; oy = 0; oz = 0;
cache_lap2[x + ox + 2][y + oy + 2][z + oz + 2] = lap(cache_lap, ddm2, x, y, z, ox, oy, oz, 2);
__syncthreads();
// phase x,0,0
ox = 8; oy = 0; oz = 0;
if(x < 2) cache_lap2[x + ox + 2][y + oy + 2][z + oz + 2] = lap(cache_lap, ddm2, x, y, z, ox, oy, oz, 2);
__syncthreads();
// phase 0,y,0
ox = 0; oy = 8; oz = 0;
if(y < 2) cache_lap2[x + ox + 2][y + oy + 2][z + oz + 2] = lap(cache_lap, ddm2, x, y, z, ox, oy, oz, 2);
__syncthreads();
// phase 0,0,z
ox = 0; oy = 0; oz = 8;
if(z < 2) cache_lap2[x + ox + 2][y + oy + 2][z + oz + 2] = lap(cache_lap, ddm2, x, y, z, ox, oy, oz, 2);
__syncthreads();
// phase x,y,0
ox = 8; oy = 8; oz = 0;
if(x < 2 && y < 2) cache_lap2[x + ox + 2][y + oy + 2][z + oz + 2] = lap(cache_lap, ddm2, x, y, z, ox, oy, oz, 2);
__syncthreads();
// phase x,0,z
ox = 8; oy = 0; oz = 8;
if(x < 2 && z < 2) cache_lap2[x + ox + 2][y + oy + 2][z + oz + 2] = lap(cache_lap, ddm2, x, y, z, ox, oy, oz, 2);
__syncthreads();
// phase 0,y,z
ox = 0; oy = 8; oz = 8;
if(y < 2 && z < 2) cache_lap2[x + ox + 2][y + oy + 2][z + oz + 2] = lap(cache_lap, ddm2, x, y, z, ox, oy, oz, 2);
__syncthreads();
// phase x,y,z
ox = 8; oy = 8; oz = 8;
if(x < 2 && y < 2 && z < 2) cache_lap2[x + ox + 2][y + oy + 2][z + oz + 2] = lap(cache_lap, ddm2, x, y, z, ox, oy, oz, 2);
__syncthreads();
// ********************************************************************************
// ********************************************************************************
// compute lap3
cache_lap3[x + 3][y + 3][z + 3] = lap(cache_lap2, ddm2, x, y, z, 0, 0, 0, 3);
__syncthreads();
// ********************************************************************************
float c = cache[x + 3][y + 3][z + 3];
float xm = cache[x + 2][y + 3][z + 3];
float xp = cache[x + 4][y + 3][z + 3];
float ym = cache[x + 3][y + 2][z + 3];
float yp = cache[x + 3][y + 4][z + 3];
float zm = cache[x + 3][y + 3][z + 2];
float zp = cache[x + 3][y + 3][z + 4];
float lapPsi3 = ddm2 *
(xm * xm * xm + xp * xp * xp
+ ym * ym * ym + yp * yp * yp
+ zm * zm * zm + zp * zp * zp
- 6.0f * c * c * c);
// ********************************************************************************
// PFC
// out[((bz + z + 3) * sy + by + y + 3) * sx + bx + x + 3] =
// cache[x + 3][y + 3][z + 3]
// + dt * (lapPsi3
// + (1.0f + r) * cache_lap[x + 3][y + 3][z + 3]
// + 2.0f * cache_lap2[x + 3][y + 3][z + 3]
// + cache_lap3[x + 3][y + 3][z + 3]);
// ********************************************************************************
// ********************************************************************************
// PFC semi-implicit
out[((bz + z + 3) * sy + by + y + 3) * sx + bx + x + 3] =
1 / (1.0f - dt * (- 6.0f * (r + 1.0f) * ddm2 + 84.0f * ddm2 * ddm2 - 324.0f * ddm2 * ddm2 * ddm2))
* (c + dt * (lapPsi3
+ (1.0f + r) * (cache_lap[x + 3][y + 3][z + 3] + 6.0f * c * ddm2)
+ 2.0f * (cache_lap2[x + 3][y + 3][z + 3] - 42.0f * c * ddm2 * ddm2)
+ (cache_lap3[x + 3][y + 3][z + 3] + 324.0f * c * ddm2 * ddm2 * ddm2)));
// ********************************************************************************
// ********************************************************************************
// LAPLACE
// out[((bz + z + 3) * sy + by + y + 3) * sx + bx + x + 3] = 0.16666666666f * (cache_lap[x + 3][y + 3][z + 3] + 6.0f * cache[x + 3][y + 3][z + 3]);
// ********************************************************************************
// ********************************************************************************
// ID
// out[((bz + z + 3) * sy + by + y + 3) * sx + bx + x + 3] = cache_lap[x + 3][y + 3][z + 3];
// ********************************************************************************
// ********************************************************************************
// ID
// out[((bz + z + 3) * sy + by + y + 3) * sx + bx + x + 3] = in[((bz + z + 3) * sy + by + y + 3) * sx + bx + x + 3];
// ********************************************************************************
}
__device__ int ifun(int sx, int sy, int sz, int x, int y, int z){ return (z * sy + y) * sx + x; }
#define I(x,y,z) ifun(sx, sy, sz, x, y, z)
__device__ int mod(float a, float b){ return ((int)a) - ((int)b) * floor(a / b); }
__device__ int wrap(int a, int b){ return mod(a - 3, b) + 3; }
__global__ void kernel_pbc_noz(float* ar, int sx, int sy, int sz){
int sxp = sx - 6;
int syp = sy - 6;
int szp = sz - 6;
int a = threadIdx.x;
int b = threadIdx.y;
// threadblock: 16x16
int x_mul_max = sx / 16;
int y_mul_max = sy / 16;
int z_mul_max = sz / 16;
// x - y
// for(int x_mul = 0; x_mul <= x_mul_max; x_mul++){
// for(int y_mul = 0; y_mul <= y_mul_max; y_mul++){
// int xx = 16 * x_mul + a;
// int yy = 16 * y_mul + b;
// if(xx < sx && yy < sy){
// int xc = wrap(xx, sxp);
// int yc = wrap(yy, syp);
// ar[I(xx, yy, sz - 3)] = ar[I(xc, yc, 3)];
// ar[I(xx, yy, sz - 2)] = ar[I(xc, yc, 4)];
// ar[I(xx, yy, sz - 1)] = ar[I(xc, yc, 5)];
// ar[I(xx, yy, 2)] = ar[I(xc, yc, sz - 4)];
// ar[I(xx, yy, 1)] = ar[I(xc, yc, sz - 5)];
// ar[I(xx, yy, 0)] = ar[I(xc, yc, sz - 6)];
// }
// }
// }
// x - z
for(int x_mul = 0; x_mul <= x_mul_max; x_mul++){
for(int z_mul = 0; z_mul <= z_mul_max; z_mul++){
int xx = 16 * x_mul + a;
int zz = 16 * z_mul + b;
if(xx < sx && zz < sz){
int xc = wrap(xx, sxp);
int zc = wrap(zz, szp);
ar[I(xx, sy - 3, zz)] = ar[I(xc, 3, zc)];
ar[I(xx, sy - 2, zz)] = ar[I(xc, 4, zc)];
ar[I(xx, sy - 1, zz)] = ar[I(xc, 5, zc)];
ar[I(xx, 2, zz)] = ar[I(xc, sy - 4, zc)];
ar[I(xx, 1, zz)] = ar[I(xc, sy - 5, zc)];
ar[I(xx, 0, zz)] = ar[I(xc, sy - 6, zc)];
}
}
}
// y - z
for(int y_mul = 0; y_mul <= y_mul_max; y_mul++){
for(int z_mul = 0; z_mul <= z_mul_max; z_mul++){
int yy = 16 * y_mul + a;
int zz = 16 * z_mul + b;
if(yy < sy && zz < sz){
int yc = wrap(yy, syp);
int zc = wrap(zz, szp);
ar[I(sx - 3, yy, zz)] = ar[I(3, yc, zc)];
ar[I(sx - 2, yy, zz)] = ar[I(4, yc, zc)];
ar[I(sx - 1, yy, zz)] = ar[I(5, yc, zc)];
ar[I(2, yy, zz)] = ar[I(sx - 4, yc, zc)];
ar[I(1, yy, zz)] = ar[I(sx - 5, yc, zc)];
ar[I(0, yy, zz)] = ar[I(sx - 6, yc, zc)];
}
}
}
}
__global__ void kernel_ghost_copy(float* ar, float* out_left, float* out_right, int sx, int sy, int sz){
int y_mul_max = sy / 256;
for(int z = 0; z < 3; z++){
for(int y_mul = 0; y_mul <= y_mul_max; y_mul++){
int y = y_mul * 256 + threadIdx.x;
if(y < sy){
int base_addr_lin = (z * sy + y) * sx;
int base_addr_left = ((z + 3) * sy + y) * sx;
int base_addr_right = ((sz - 6 + z) * sy + y) * sx;
for(int x = 0; x < sx; x++){
out_left[base_addr_lin + x] = ar[base_addr_left + x];
out_right[base_addr_lin + x] = ar[base_addr_right + x];
}
}
}
}
}
__global__ void kernel_ghost_copy_inv(float* ar, float* out_left, float* out_right, int sx, int sy, int sz){
int y_mul_max = sy / 256;
for(int z = 0; z < 3; z++){
for(int y_mul = 0; y_mul <= y_mul_max; y_mul++){
int y = y_mul * 256 + threadIdx.x;
if(y < sy){
int base_addr_lin = (z * sy + y) * sx;
int base_addr_left = ((z + 0) * sy + y) * sx;
int base_addr_right = ((sz - 3 + z) * sy + y) * sx;
for(int x = 0; x < sx; x++){
ar[base_addr_left + x] = out_left[base_addr_lin + x];
ar[base_addr_right + x] = out_right[base_addr_lin + x];
}
}
}
}
}
__global__ void kernel_source(float* ar, int sx, int sy, int sz){
ar[((3) * sy + 3) * sx + 3] = 1.0f;
// ar[((3 + 200) * sy + 3) * sx + 3 + 1200] = 1.0f;
}
}
|
8b59a95eee593ce21c297810e2f10426dcc84525.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <iostream>
class dreference {
public:
dreference(int *memloc) {
this->memloc = memloc;
}
int operator ()() {
return getval();
}
int operator = (int newval) {
//printf("Writing %d at %p\n", newval, memloc);
hipMemcpy(memloc, &newval, sizeof(int), hipMemcpyHostToDevice);
return newval; // can return self-reference to allow cascaded =.
}
int getval() {
int val;
hipMemcpy(&val, memloc, sizeof(int), hipMemcpyDeviceToHost);
return val;
}
private:
int *memloc;
};
class dvector {
public:
dvector(unsigned size);
~dvector();
dreference operator [](unsigned ii);
void print();
private:
int *arr;
int size;
};
dvector::dvector(unsigned size) {
hipMalloc(&arr, size * sizeof(int));
this->size = size;
//printf("arr points to %p\n", arr);
}
dvector::~dvector() {
hipFree(arr);
arr = NULL;
}
dreference dvector::operator [](unsigned ii) {
return dreference(arr + ii);
}
void dvector::print() {
int aval;
for (int ii = 0; ii < size; ++ii) {
hipMemcpy(&aval, arr + ii, sizeof(int), hipMemcpyDeviceToHost);
std::cout << aval << ", ";
}
std::cout << std::endl;
}
std::ostream & operator <<(std::ostream &os, dreference dd) {
return os << dd.getval();
}
int main() {
dvector dv(10);
dv[0] = 1;
dv[1] = 2;
dv[5] = 2;
std::cout << dv[0] << ", " << dv[1] << std::endl;
dv.print();
return 0;
}
| 8b59a95eee593ce21c297810e2f10426dcc84525.cu | #include <stdio.h>
#include <cuda.h>
#include <iostream>
class dreference {
public:
dreference(int *memloc) {
this->memloc = memloc;
}
int operator ()() {
return getval();
}
int operator = (int newval) {
//printf("Writing %d at %p\n", newval, memloc);
cudaMemcpy(memloc, &newval, sizeof(int), cudaMemcpyHostToDevice);
return newval; // can return self-reference to allow cascaded =.
}
int getval() {
int val;
cudaMemcpy(&val, memloc, sizeof(int), cudaMemcpyDeviceToHost);
return val;
}
private:
int *memloc;
};
class dvector {
public:
dvector(unsigned size);
~dvector();
dreference operator [](unsigned ii);
void print();
private:
int *arr;
int size;
};
dvector::dvector(unsigned size) {
cudaMalloc(&arr, size * sizeof(int));
this->size = size;
//printf("arr points to %p\n", arr);
}
dvector::~dvector() {
cudaFree(arr);
arr = NULL;
}
dreference dvector::operator [](unsigned ii) {
return dreference(arr + ii);
}
void dvector::print() {
int aval;
for (int ii = 0; ii < size; ++ii) {
cudaMemcpy(&aval, arr + ii, sizeof(int), cudaMemcpyDeviceToHost);
std::cout << aval << ", ";
}
std::cout << std::endl;
}
std::ostream & operator <<(std::ostream &os, dreference dd) {
return os << dd.getval();
}
int main() {
dvector dv(10);
dv[0] = 1;
dv[1] = 2;
dv[5] = 2;
std::cout << dv[0] << ", " << dv[1] << std::endl;
dv.print();
return 0;
}
|
c8e52e87e9f9aab7246f2fbfea977db718430236.hip | // !!! This is a file automatically generated by hipify!!!
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::Gemm<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
2>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
| c8e52e87e9f9aab7246f2fbfea977db718430236.cu | #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::Gemm<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
2>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
|
9af1e8f29c8e01dd74aa17954ac3a08cc71b811c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2014 Nervana Systems Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
extern "C"
__global__ void __launch_bounds__(256) sgemm_nn_192x192
(
unsigned* param_Rand,
const float* param_A,
const float* param_B,
float* param_C,
int param_lda8,
int param_ldb8,
int param_ldc,
int param_m,
int param_n,
int param_k,
float param_alpha,
float param_beta,
int param_flags,
int param_ldaz,
int param_ldbz,
int param_ldcz,
int param_batch_loops
)
{
__shared__ float share[192* 8 * 4 + 32];
int tid = threadIdx.x;
share[tid] = 1;
param_C[tid] = share[255-tid];
}
| 9af1e8f29c8e01dd74aa17954ac3a08cc71b811c.cu | /*
* Copyright 2014 Nervana Systems Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
extern "C"
__global__ void __launch_bounds__(256) sgemm_nn_192x192
(
unsigned* param_Rand,
const float* param_A,
const float* param_B,
float* param_C,
int param_lda8,
int param_ldb8,
int param_ldc,
int param_m,
int param_n,
int param_k,
float param_alpha,
float param_beta,
int param_flags,
int param_ldaz,
int param_ldbz,
int param_ldcz,
int param_batch_loops
)
{
__shared__ float share[192* 8 * 4 + 32];
int tid = threadIdx.x;
share[tid] = 1;
param_C[tid] = share[255-tid];
}
|
58ecc7e31118e6b04ef6d34545640bf1147bc4c6.hip | // !!! This is a file automatically generated by hipify!!!
/**
* \file rev.cu
* \brief CUDA unified virtual addressing benchmark example (reverse
* a list).
*/
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "timer.c"
#define MINTIME 2.0 /*!< Minimum running time, in seconds */
#define MINTRIALS 3 /*!< Minimum number of timing trials, in seconds */
#define MAXVAL 1000 /*!< Maximum array value */
#if CUDART_VERSION < 2020
# error "*** Must have a CUDA 2.2 or greater. ***"
#endif
/** Initialize A[i] = i % MAXVAL */
static void initArray (size_t n, int* A);
/** Checks that A[i] == i % MAXVAL */
static void verifyArray (size_t n, int* A);
/* ================================================== */
/**
* Copies data from A_cpu (on the CPU) to A_gpu (on the GPU),
* reverses the data elements on the GPU, and copies the data back.
*/
__global__
void reverseArray (long n, int* A_gpu)
{
long i = blockIdx.x * blockDim.x + threadIdx.x;
const long n_half = n >> 1; /* floor (n / 2) */
if (i < n_half) {
const int i_pair = n - i - 1;
int a = A_gpu[i];
int b = A_gpu[i_pair];
A_gpu[i] = b;
A_gpu[i_pair] = a;
}
}
/* ================================================== */
/** Benchmarks the reversal operation on unpinned memory. */
static
long double
benchmarkReverseWithCopies (size_t n, int* A_cpu)
{
int* A_gpu = NULL;
hipMalloc ((void **)&A_gpu, n * sizeof (int)); assert (A_gpu);
/* Do one test run */
fprintf (stderr, "benchmarkReverseWithCopies: Testing...\n");
const int BLOCKSIZE = 1024;
const int NUMBLOCKS = (((n+1)/2) + BLOCKSIZE - 1) / BLOCKSIZE;
initArray (n, A_cpu);
hipMemcpy (A_gpu, A_cpu, n * sizeof (int), hipMemcpyDefault);
hipLaunchKernelGGL(( reverseArray) , dim3(NUMBLOCKS), dim3(BLOCKSIZE), 0, 0, n, A_gpu);
hipMemcpy (A_cpu, A_gpu, n * sizeof (int), hipMemcpyDefault);
verifyArray (n, A_cpu);
fprintf (stderr, "==> Passed!\n\n");
/* Timing runs */
fprintf (stderr, "benchmarkReverseWithCopies: Timing...\n");
long double t_elapsed = 0;
size_t trials = 0;
stopwatch_init ();
struct stopwatch_t* timer = stopwatch_create ();
stopwatch_start (timer);
while (trials < MINTRIALS || t_elapsed < MINTIME) {
hipMemcpy (A_gpu, A_cpu, n * sizeof (int), hipMemcpyDefault);
hipLaunchKernelGGL(( reverseArray) , dim3(NUMBLOCKS), dim3(BLOCKSIZE), 0, 0, n, A_gpu);
hipMemcpy (A_cpu, A_gpu, n * sizeof (int), hipMemcpyDefault);
hipDeviceSynchronize ();
++trials;
t_elapsed = stopwatch_elapsed (timer);
}
stopwatch_destroy (timer);
fprintf (stderr, "==> %lu trials took %Lg seconds.\n", trials, t_elapsed);
hipFree (A_gpu);
return t_elapsed / trials;
}
/* ================================================== */
/** Benchmarks the reversal operation on pinned memory. */
static
long double
benchmarkReverseWithoutCopies (size_t n, int* A_cpu_pinned)
{
/* Do one test run */
fprintf (stderr, "benchmarkReverseWithoutCopies: Testing...\n");
const int BLOCKSIZE = 1024;
const int NUMBLOCKS = (((n+1)/2) + BLOCKSIZE - 1) / BLOCKSIZE;
initArray (n, A_cpu_pinned);
hipLaunchKernelGGL(( reverseArray) , dim3(NUMBLOCKS), dim3(BLOCKSIZE), 0, 0, n, A_cpu_pinned);
hipDeviceSynchronize ();
verifyArray (n, A_cpu_pinned);
fprintf (stderr, "==> Passed!\n\n");
/* Timing runs */
fprintf (stderr, "benchmarkReverseWithoutCopies: Timing...\n");
long double t_elapsed = 0;
size_t trials = 0;
stopwatch_init ();
struct stopwatch_t* timer = stopwatch_create ();
stopwatch_start (timer);
while (trials < MINTRIALS || t_elapsed < MINTIME) {
hipLaunchKernelGGL(( reverseArray) , dim3(NUMBLOCKS), dim3(BLOCKSIZE), 0, 0, n, A_cpu_pinned);
hipDeviceSynchronize ();
++trials;
t_elapsed = stopwatch_elapsed (timer);
}
stopwatch_destroy (timer);
fprintf (stderr, "==> %lu trials took %Lg seconds.\n", trials, t_elapsed);
return t_elapsed / trials;
}
/* ================================================== */
#define TARGET(i) ((int)((i) % MAXVAL))
static
void
initArray (size_t n, int* A)
{
for (size_t i = 0; i < n; ++i) {
const int target = TARGET (i);
A[i] = target;
}
}
static
void
verifyArray (size_t n, int* A)
{
for (size_t i = 0; i < n; ++i) {
const int target = TARGET (n - i - 1);
if (A[i] != target) {
fprintf (stderr, "*** ERROR: Element A[%lu] == %d != %d! ***\n",
(unsigned long)i, A[i], target);
assert (0);
}
}
}
/* ================================================== */
int
main (int argc, char* argv[])
{
if (argc < 2) {
fprintf (stderr, "usage: %s <n>\n", argv[0]);
return -1;
}
long n_raw = atol (argv[1]);
assert (n_raw > 0);
const size_t n = (size_t)n_raw;
const size_t n_bytes = n * sizeof (int);
fprintf (stderr, "n = %lu (~ %.1f MiB)\n", n, (double)n_bytes/1024/1024);
fprintf (stderr, "Test 1: Using 'malloc' on CPU...\n");
int* A_cpu = (int *)malloc (n_bytes);
assert (A_cpu);
long double t_baseline = benchmarkReverseWithCopies (n, A_cpu);
printf ("==> Reversal with explicit copies: %Lg seconds (%Lg effective GB/s)\n\n",
t_baseline, (long double)2e-9 * n_bytes / t_baseline);
free (A_cpu);
fprintf (stderr, "Test 2: Using pinned hipHostMalloc...\n");
int* A_cpu_pinned = NULL;
hipHostMalloc ((void **)&A_cpu_pinned, n_bytes, hipHostMallocMapped | hipHostMallocPortable);
assert (A_cpu_pinned);
long double t_pinned = benchmarkReverseWithoutCopies (n, A_cpu_pinned);
printf ("==> Reversal without explicit copies: %Lg seconds (%Lg effective GB/s)\n\n",
t_pinned, (long double)2e-9 * n_bytes / t_pinned);
hipHostFree (A_cpu_pinned);
return 0;
}
/* eof */
| 58ecc7e31118e6b04ef6d34545640bf1147bc4c6.cu | /**
* \file rev.cu
* \brief CUDA unified virtual addressing benchmark example (reverse
* a list).
*/
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <cuda.h>
#include "timer.c"
#define MINTIME 2.0 /*!< Minimum running time, in seconds */
#define MINTRIALS 3 /*!< Minimum number of timing trials, in seconds */
#define MAXVAL 1000 /*!< Maximum array value */
#if CUDART_VERSION < 2020
# error "*** Must have a CUDA 2.2 or greater. ***"
#endif
/** Initialize A[i] = i % MAXVAL */
static void initArray (size_t n, int* A);
/** Checks that A[i] == i % MAXVAL */
static void verifyArray (size_t n, int* A);
/* ================================================== */
/**
* Copies data from A_cpu (on the CPU) to A_gpu (on the GPU),
* reverses the data elements on the GPU, and copies the data back.
*/
__global__
void reverseArray (long n, int* A_gpu)
{
long i = blockIdx.x * blockDim.x + threadIdx.x;
const long n_half = n >> 1; /* floor (n / 2) */
if (i < n_half) {
const int i_pair = n - i - 1;
int a = A_gpu[i];
int b = A_gpu[i_pair];
A_gpu[i] = b;
A_gpu[i_pair] = a;
}
}
/* ================================================== */
/** Benchmarks the reversal operation on unpinned memory. */
static
long double
benchmarkReverseWithCopies (size_t n, int* A_cpu)
{
int* A_gpu = NULL;
cudaMalloc ((void **)&A_gpu, n * sizeof (int)); assert (A_gpu);
/* Do one test run */
fprintf (stderr, "benchmarkReverseWithCopies: Testing...\n");
const int BLOCKSIZE = 1024;
const int NUMBLOCKS = (((n+1)/2) + BLOCKSIZE - 1) / BLOCKSIZE;
initArray (n, A_cpu);
cudaMemcpy (A_gpu, A_cpu, n * sizeof (int), cudaMemcpyDefault);
reverseArray <<<NUMBLOCKS, BLOCKSIZE>>> (n, A_gpu);
cudaMemcpy (A_cpu, A_gpu, n * sizeof (int), cudaMemcpyDefault);
verifyArray (n, A_cpu);
fprintf (stderr, "==> Passed!\n\n");
/* Timing runs */
fprintf (stderr, "benchmarkReverseWithCopies: Timing...\n");
long double t_elapsed = 0;
size_t trials = 0;
stopwatch_init ();
struct stopwatch_t* timer = stopwatch_create ();
stopwatch_start (timer);
while (trials < MINTRIALS || t_elapsed < MINTIME) {
cudaMemcpy (A_gpu, A_cpu, n * sizeof (int), cudaMemcpyDefault);
reverseArray <<<NUMBLOCKS, BLOCKSIZE>>> (n, A_gpu);
cudaMemcpy (A_cpu, A_gpu, n * sizeof (int), cudaMemcpyDefault);
cudaDeviceSynchronize ();
++trials;
t_elapsed = stopwatch_elapsed (timer);
}
stopwatch_destroy (timer);
fprintf (stderr, "==> %lu trials took %Lg seconds.\n", trials, t_elapsed);
cudaFree (A_gpu);
return t_elapsed / trials;
}
/* ================================================== */
/** Benchmarks the reversal operation on pinned memory. */
static
long double
benchmarkReverseWithoutCopies (size_t n, int* A_cpu_pinned)
{
/* Do one test run */
fprintf (stderr, "benchmarkReverseWithoutCopies: Testing...\n");
const int BLOCKSIZE = 1024;
const int NUMBLOCKS = (((n+1)/2) + BLOCKSIZE - 1) / BLOCKSIZE;
initArray (n, A_cpu_pinned);
reverseArray <<<NUMBLOCKS, BLOCKSIZE>>> (n, A_cpu_pinned);
cudaDeviceSynchronize ();
verifyArray (n, A_cpu_pinned);
fprintf (stderr, "==> Passed!\n\n");
/* Timing runs */
fprintf (stderr, "benchmarkReverseWithoutCopies: Timing...\n");
long double t_elapsed = 0;
size_t trials = 0;
stopwatch_init ();
struct stopwatch_t* timer = stopwatch_create ();
stopwatch_start (timer);
while (trials < MINTRIALS || t_elapsed < MINTIME) {
reverseArray <<<NUMBLOCKS, BLOCKSIZE>>> (n, A_cpu_pinned);
cudaDeviceSynchronize ();
++trials;
t_elapsed = stopwatch_elapsed (timer);
}
stopwatch_destroy (timer);
fprintf (stderr, "==> %lu trials took %Lg seconds.\n", trials, t_elapsed);
return t_elapsed / trials;
}
/* ================================================== */
#define TARGET(i) ((int)((i) % MAXVAL))
static
void
initArray (size_t n, int* A)
{
for (size_t i = 0; i < n; ++i) {
const int target = TARGET (i);
A[i] = target;
}
}
static
void
verifyArray (size_t n, int* A)
{
for (size_t i = 0; i < n; ++i) {
const int target = TARGET (n - i - 1);
if (A[i] != target) {
fprintf (stderr, "*** ERROR: Element A[%lu] == %d != %d! ***\n",
(unsigned long)i, A[i], target);
assert (0);
}
}
}
/* ================================================== */
int
main (int argc, char* argv[])
{
if (argc < 2) {
fprintf (stderr, "usage: %s <n>\n", argv[0]);
return -1;
}
long n_raw = atol (argv[1]);
assert (n_raw > 0);
const size_t n = (size_t)n_raw;
const size_t n_bytes = n * sizeof (int);
fprintf (stderr, "n = %lu (~ %.1f MiB)\n", n, (double)n_bytes/1024/1024);
fprintf (stderr, "Test 1: Using 'malloc' on CPU...\n");
int* A_cpu = (int *)malloc (n_bytes);
assert (A_cpu);
long double t_baseline = benchmarkReverseWithCopies (n, A_cpu);
printf ("==> Reversal with explicit copies: %Lg seconds (%Lg effective GB/s)\n\n",
t_baseline, (long double)2e-9 * n_bytes / t_baseline);
free (A_cpu);
fprintf (stderr, "Test 2: Using pinned cudaHostAlloc...\n");
int* A_cpu_pinned = NULL;
cudaHostAlloc ((void **)&A_cpu_pinned, n_bytes, cudaHostAllocMapped | cudaHostAllocPortable);
assert (A_cpu_pinned);
long double t_pinned = benchmarkReverseWithoutCopies (n, A_cpu_pinned);
printf ("==> Reversal without explicit copies: %Lg seconds (%Lg effective GB/s)\n\n",
t_pinned, (long double)2e-9 * n_bytes / t_pinned);
cudaFreeHost (A_cpu_pinned);
return 0;
}
/* eof */
|
18c97ac8d9e3094f899702f8b847709001b840c9.hip | // !!! This is a file automatically generated by hipify!!!
//jacobi7.cu
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <jacobi7_cuda_shared.h>
#include <jacobi7.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
void initial_data(float *h_A, float *h_B, const int xyz){
// randomly generaed test data
srand(time(NULL));
int i = 0;
for(; i < xyz; i++) {
h_A[i] = 1 + (float)rand() / (float)RAND_MAX;
h_B[i] = h_A[i];
}
}
int main(int argc, char* *argv){
if(argc != 7) {
printf("USAGE: %s <NX> <NY> <NZ> <TX> <TY> <TIME STEPS>\n", argv[0]);
return 1;
}
// program parameters trans
const int nx = atoi(argv[1]);
const int ny = atoi(argv[2]);
const int nz = atoi(argv[3]);
const int tx = atoi(argv[4]);
const int ty = atoi(argv[5]);
const int timesteps = atoi(argv[6]);
const int xyz = nx * ny * nz;
const int xyz_bytes = xyz * sizeof(float);
float *h_A, *h_A1;
float *h_B, *h_B1;
float *d_A;
float *d_B;
int devId = 0;
hipDeviceProp_t prop;
checkCuda( hipGetDeviceProperties(&prop, devId));
printf("Device : %s\n", prop.name);
checkCuda( hipSetDevice(devId));
// Allocate host buffers
checkCuda(hipHostMalloc((void**)&h_A, xyz_bytes)); // host pinned
checkCuda(hipHostMalloc((void**)&h_B, xyz_bytes));
// for comparison btw CPU and GPU version
checkCuda(hipHostMalloc((void**)&h_A1, xyz_bytes));
checkCuda(hipHostMalloc((void**)&h_B1, xyz_bytes));
// grid data iniatialization
// randomly generaed test data
srand(time(NULL));
int i = 0;
for(; i < xyz; i++) {
h_A[i] = 1 + (float)rand() / (float)RAND_MAX;
h_A1[i] = h_B1[i] = h_B[i] = h_A[i];
}
// A simple comparison of the result
int testIndex = 3 + 3*nx+ 3*nx*ny;
printf("Iniatialized data[%d]=%f\n", testIndex , h_A[testIndex]);
printf("h_A1[%d]=%f\n", testIndex, h_A1[testIndex]);
printf("h_B1[%d]=%f\n", testIndex, h_B1[testIndex]);
const float fac = 6.0/(h_A[0] * h_A[0]);
float *tmp;
dim3 grid((nx+tx-1)/tx, (ny+ty-1)/ty);
dim3 block(tx, ty);
printf("grid:(%d, %d)\n", grid.x, grid.y);
printf("block:(%d, %d)\n", tx, ty);
float ms, ms1; // elapsed time in milliseconds
printf("Start computing...\n");
/* set the ratio of cache/shared memory
hipFuncCachePreferNone: Default function cache configuration, no preference
hipFuncCachePreferShared: Prefer larger shared memory and smaller L1 cache
hipFuncCachePreferL1: Prefer larger L1 cache and smaller shared memory
checkCuda(hipDeviceSetCacheConfig(hipFuncCachePreferL1));*/
//const int sharedMemSize = (block.x + 2) * (block.y + 2) * sizeof(float);
// create events and streams
hipEvent_t startEvent, stopEvent, startEvent1, stopEvent1;
checkCuda( hipEventCreate(&startEvent) );
checkCuda( hipEventCreate(&stopEvent) );
checkCuda( hipEventCreate(&startEvent1));
checkCuda( hipEventCreate(&stopEvent1));
// timing start include data transfer and memory allocation
checkCuda( hipEventRecord(startEvent,0) );
// Allocate device buffers
checkCuda(hipMalloc((void**)&d_A, xyz_bytes)); // device
checkCuda(hipMalloc((void**)&d_B, xyz_bytes));
float* input = d_A;
float* output = d_B;
// copy data to device
checkCuda( hipMemcpy(d_A, h_A, xyz_bytes, hipMemcpyHostToDevice));
checkCuda( hipMemcpy(d_B, d_A, xyz_bytes, hipMemcpyDeviceToDevice));
// timing start pure gpu computing
checkCuda( hipEventRecord(startEvent1, 0));
// Run the GPU kernel
for(int t = 0; t < timesteps; t += 1) {
hipLaunchKernelGGL(( jacobi3d_7p_shmem_adam_reg), dim3(grid), dim3(block), 0, 0, input, output, nx, ny, nz, fac);
// swap input and output
tmp = input;
input = output;
output = tmp;
}
// timing end pure gpu computing
checkCuda( hipEventRecord(stopEvent1, 0));
checkCuda( hipEventSynchronize(stopEvent1));
checkCuda( hipEventElapsedTime(&ms1, startEvent1, stopEvent1));
printf("Time of register version (pure GPU) (ms): %f\n", ms1);
double gflop = (xyz * 1e-9) * 7.0 * timesteps;
double gflop_per_sec = gflop * 1e3 / ms1;
printf("(GPU) %lf GFlop/s\n", gflop_per_sec);
double mupdate_per_sec = ((xyz >> 20) * timesteps) * 1e3 / ms1;
printf("(GPU) %lf M updates/s\n", mupdate_per_sec);
if(timesteps%2==0)
checkCuda( hipMemcpy(h_A, output, xyz_bytes, hipMemcpyDeviceToHost));
else
checkCuda( hipMemcpy(h_A, input, xyz_bytes, hipMemcpyDeviceToHost));
checkCuda( hipEventRecord(stopEvent, 0));
checkCuda( hipEventSynchronize(stopEvent));
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent));
float *gpuResult = h_A;
printf("Time of shared memory version (ms): %f\n", ms);
printf("(including data transfer and memory allocation in GPU.)\n");
gflop = (xyz * 1e-9) * 7.0 * timesteps;
gflop_per_sec = gflop * 1e3 / ms;
printf("(GPU) %lf GFlop/s\n", gflop_per_sec);
mupdate_per_sec = ((xyz >> 20) * timesteps) * 1e3 / ms;
printf("(GPU) %lf M updates/s\n", mupdate_per_sec);
// Run the CPU version
//float startTime = rtclock();
float *tmp1;
for(int t = 0; t < timesteps; t += 1) {
jacobi7(nx, ny, nz, h_A1, h_B1, fac);
tmp1 = h_A1;
h_A1 = h_B1;
h_B1 = tmp1;
}
float *cpuResult;
if ((timesteps%2) == 0)
cpuResult = h_B1;
else
cpuResult = h_A1;
/*float endTime = rtclock();
double elapsedTimeC = endTime - startTime;
printf("Elapsed Time:%lf\n", elapsedTimeC);
flops = xyz * 7.0 * timesteps;
gflops = flops / elapsedTimeC / 1e9;
printf("(CPU) %lf GFlop/s\n", gflops);
*/
// compare the results btw CPU and GPU version
double errorNorm, refNorm, diff;
errorNorm = 0.0;
refNorm = 0.0;
i = 0;
for (; i < xyz; ++i){
diff = cpuResult[i] - gpuResult[i];
errorNorm += diff * diff;
refNorm += cpuResult[i] * cpuResult[i];
}
errorNorm = sqrt(errorNorm);
refNorm = sqrt(refNorm);
printf("Error Norm:%lf\n", errorNorm);
printf("Ref Norm:%lf\n", refNorm);
if(abs(refNorm) < 1e-7) {
printf("Correctness, FAILED\n");
}
else if((errorNorm / refNorm) > 1e-2) {
printf("Correctness, FAILED\n");
}
else {
printf("Correctness, PASSED\n");
}
printf("GPU[%d]=%f\n", testIndex, gpuResult[testIndex]);
printf("CPU[%d]=%f\n", testIndex, cpuResult[testIndex]);
printf("h_A[%d]=%f\n", testIndex, h_A[testIndex]);
printf("h_B[%d]=%f\n", testIndex, h_B[testIndex]);
printf("h_A1[%d]=%f\n", testIndex, h_A1[testIndex]);
printf("h_B1[%d]=%f\n", testIndex, h_B1[testIndex]);
// cleanup
checkCuda( hipEventDestroy(startEvent));
checkCuda( hipEventDestroy(stopEvent));
hipHostFree(h_A);
hipHostFree(h_B);
hipHostFree(h_A1);
hipHostFree(h_B1);
hipFree(d_A);
hipFree(d_B);
return 0;
} | 18c97ac8d9e3094f899702f8b847709001b840c9.cu | //jacobi7.cu
#include <cuda.h>
#include <stdio.h>
#include <jacobi7_cuda_shared.h>
#include <jacobi7.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
void initial_data(float *h_A, float *h_B, const int xyz){
// randomly generaed test data
srand(time(NULL));
int i = 0;
for(; i < xyz; i++) {
h_A[i] = 1 + (float)rand() / (float)RAND_MAX;
h_B[i] = h_A[i];
}
}
int main(int argc, char* *argv){
if(argc != 7) {
printf("USAGE: %s <NX> <NY> <NZ> <TX> <TY> <TIME STEPS>\n", argv[0]);
return 1;
}
// program parameters trans
const int nx = atoi(argv[1]);
const int ny = atoi(argv[2]);
const int nz = atoi(argv[3]);
const int tx = atoi(argv[4]);
const int ty = atoi(argv[5]);
const int timesteps = atoi(argv[6]);
const int xyz = nx * ny * nz;
const int xyz_bytes = xyz * sizeof(float);
float *h_A, *h_A1;
float *h_B, *h_B1;
float *d_A;
float *d_B;
int devId = 0;
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, devId));
printf("Device : %s\n", prop.name);
checkCuda( cudaSetDevice(devId));
// Allocate host buffers
checkCuda(cudaMallocHost((void**)&h_A, xyz_bytes)); // host pinned
checkCuda(cudaMallocHost((void**)&h_B, xyz_bytes));
// for comparison btw CPU and GPU version
checkCuda(cudaMallocHost((void**)&h_A1, xyz_bytes));
checkCuda(cudaMallocHost((void**)&h_B1, xyz_bytes));
// grid data iniatialization
// randomly generaed test data
srand(time(NULL));
int i = 0;
for(; i < xyz; i++) {
h_A[i] = 1 + (float)rand() / (float)RAND_MAX;
h_A1[i] = h_B1[i] = h_B[i] = h_A[i];
}
// A simple comparison of the result
int testIndex = 3 + 3*nx+ 3*nx*ny;
printf("Iniatialized data[%d]=%f\n", testIndex , h_A[testIndex]);
printf("h_A1[%d]=%f\n", testIndex, h_A1[testIndex]);
printf("h_B1[%d]=%f\n", testIndex, h_B1[testIndex]);
const float fac = 6.0/(h_A[0] * h_A[0]);
float *tmp;
dim3 grid((nx+tx-1)/tx, (ny+ty-1)/ty);
dim3 block(tx, ty);
printf("grid:(%d, %d)\n", grid.x, grid.y);
printf("block:(%d, %d)\n", tx, ty);
float ms, ms1; // elapsed time in milliseconds
printf("Start computing...\n");
/* set the ratio of cache/shared memory
cudaFuncCachePreferNone: Default function cache configuration, no preference
cudaFuncCachePreferShared: Prefer larger shared memory and smaller L1 cache
cudaFuncCachePreferL1: Prefer larger L1 cache and smaller shared memory
checkCuda(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));*/
//const int sharedMemSize = (block.x + 2) * (block.y + 2) * sizeof(float);
// create events and streams
cudaEvent_t startEvent, stopEvent, startEvent1, stopEvent1;
checkCuda( cudaEventCreate(&startEvent) );
checkCuda( cudaEventCreate(&stopEvent) );
checkCuda( cudaEventCreate(&startEvent1));
checkCuda( cudaEventCreate(&stopEvent1));
// timing start include data transfer and memory allocation
checkCuda( cudaEventRecord(startEvent,0) );
// Allocate device buffers
checkCuda(cudaMalloc((void**)&d_A, xyz_bytes)); // device
checkCuda(cudaMalloc((void**)&d_B, xyz_bytes));
float* input = d_A;
float* output = d_B;
// copy data to device
checkCuda( cudaMemcpy(d_A, h_A, xyz_bytes, cudaMemcpyHostToDevice));
checkCuda( cudaMemcpy(d_B, d_A, xyz_bytes, cudaMemcpyDeviceToDevice));
// timing start pure gpu computing
checkCuda( cudaEventRecord(startEvent1, 0));
// Run the GPU kernel
for(int t = 0; t < timesteps; t += 1) {
jacobi3d_7p_shmem_adam_reg<<<grid, block>>>(input, output, nx, ny, nz, fac);
// swap input and output
tmp = input;
input = output;
output = tmp;
}
// timing end pure gpu computing
checkCuda( cudaEventRecord(stopEvent1, 0));
checkCuda( cudaEventSynchronize(stopEvent1));
checkCuda( cudaEventElapsedTime(&ms1, startEvent1, stopEvent1));
printf("Time of register version (pure GPU) (ms): %f\n", ms1);
double gflop = (xyz * 1e-9) * 7.0 * timesteps;
double gflop_per_sec = gflop * 1e3 / ms1;
printf("(GPU) %lf GFlop/s\n", gflop_per_sec);
double mupdate_per_sec = ((xyz >> 20) * timesteps) * 1e3 / ms1;
printf("(GPU) %lf M updates/s\n", mupdate_per_sec);
if(timesteps%2==0)
checkCuda( cudaMemcpy(h_A, output, xyz_bytes, cudaMemcpyDeviceToHost));
else
checkCuda( cudaMemcpy(h_A, input, xyz_bytes, cudaMemcpyDeviceToHost));
checkCuda( cudaEventRecord(stopEvent, 0));
checkCuda( cudaEventSynchronize(stopEvent));
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent));
float *gpuResult = h_A;
printf("Time of shared memory version (ms): %f\n", ms);
printf("(including data transfer and memory allocation in GPU.)\n");
gflop = (xyz * 1e-9) * 7.0 * timesteps;
gflop_per_sec = gflop * 1e3 / ms;
printf("(GPU) %lf GFlop/s\n", gflop_per_sec);
mupdate_per_sec = ((xyz >> 20) * timesteps) * 1e3 / ms;
printf("(GPU) %lf M updates/s\n", mupdate_per_sec);
// Run the CPU version
//float startTime = rtclock();
float *tmp1;
for(int t = 0; t < timesteps; t += 1) {
jacobi7(nx, ny, nz, h_A1, h_B1, fac);
tmp1 = h_A1;
h_A1 = h_B1;
h_B1 = tmp1;
}
float *cpuResult;
if ((timesteps%2) == 0)
cpuResult = h_B1;
else
cpuResult = h_A1;
/*float endTime = rtclock();
double elapsedTimeC = endTime - startTime;
printf("Elapsed Time:%lf\n", elapsedTimeC);
flops = xyz * 7.0 * timesteps;
gflops = flops / elapsedTimeC / 1e9;
printf("(CPU) %lf GFlop/s\n", gflops);
*/
// compare the results btw CPU and GPU version
double errorNorm, refNorm, diff;
errorNorm = 0.0;
refNorm = 0.0;
i = 0;
for (; i < xyz; ++i){
diff = cpuResult[i] - gpuResult[i];
errorNorm += diff * diff;
refNorm += cpuResult[i] * cpuResult[i];
}
errorNorm = sqrt(errorNorm);
refNorm = sqrt(refNorm);
printf("Error Norm:%lf\n", errorNorm);
printf("Ref Norm:%lf\n", refNorm);
if(abs(refNorm) < 1e-7) {
printf("Correctness, FAILED\n");
}
else if((errorNorm / refNorm) > 1e-2) {
printf("Correctness, FAILED\n");
}
else {
printf("Correctness, PASSED\n");
}
printf("GPU[%d]=%f\n", testIndex, gpuResult[testIndex]);
printf("CPU[%d]=%f\n", testIndex, cpuResult[testIndex]);
printf("h_A[%d]=%f\n", testIndex, h_A[testIndex]);
printf("h_B[%d]=%f\n", testIndex, h_B[testIndex]);
printf("h_A1[%d]=%f\n", testIndex, h_A1[testIndex]);
printf("h_B1[%d]=%f\n", testIndex, h_B1[testIndex]);
// cleanup
checkCuda( cudaEventDestroy(startEvent));
checkCuda( cudaEventDestroy(stopEvent));
cudaFreeHost(h_A);
cudaFreeHost(h_B);
cudaFreeHost(h_A1);
cudaFreeHost(h_B1);
cudaFree(d_A);
cudaFree(d_B);
return 0;
} |
bc_app.hip | // !!! This is a file automatically generated by hipify!!!
// //
// ----------------------------------------------------------------------------
// // Gunrock -- Fast and Efficient GPU Graph Library
// //
// ----------------------------------------------------------------------------
// // This source code is distributed under the terms of LICENSE.TXT
// // in the root directory of this source distribution.
// //
// ----------------------------------------------------------------------------
// /**
// * @file bc_app.cu
// *
// * @brief Betweenness Centrality (BC) application
// */
#include <iostream>
#include <gunrock/gunrock.h>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph definations
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
// betweenness centrality path includesls
#include <gunrock/app/bc/bc_enactor.cuh>
#include <gunrock/app/bc/bc_test.cuh>
namespace gunrock {
namespace app {
namespace bc {
hipError_t UseParameters(util::Parameters ¶meters) {
hipError_t retval = hipSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(parameters.Use<std::string>(
"src",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"0",
"<Vertex-ID|random|largestdegree> The source vertices\n"
"\tIf random, randomly select non-zero degree vertices;\n"
"\tIf largestdegree, select vertices with largest degrees",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"src-seed",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
util::PreDefinedValues<int>::InvalidValue,
"seed to generate random sources", __FILE__, __LINE__));
return retval;
}
/**
* @brief Run BC tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] ref_distances Reference distances
* @param[in] target Where to perform the BC computation
* \return hipError_t error message(s), if any
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT,
typename VertexT = typename GraphT::VertexT>
hipError_t RunTests(util::Parameters ¶meters, GraphT &graph,
ValueT **reference_bc_values = NULL,
ValueT **reference_sigmas = NULL,
VertexT **reference_labels = NULL,
util::Location target = util::DEVICE) {
std::cout << "--- RunTests ---" << std::endl;
hipError_t retval = hipSuccess;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// parse configurations from parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("bc", parameters, graph); // initialize Info structure
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
// Allocate host-side array (for both reference and GPU-computed results)
ValueT *h_bc_values = new ValueT[graph.nodes];
ValueT *h_sigmas = new ValueT[graph.nodes];
VertexT *h_labels = new VertexT[graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
// perform the algorithm
VertexT src;
for (int run_num = 0; run_num < num_runs; ++run_num) {
auto run_index = run_num % num_srcs;
src = srcs[run_index];
#if 1
if (src == -1) {
for (src = 0 ; src < graph.nodes ; ++src) {
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
}
} else {
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
}
#else
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
#endif
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
" ms, src = " + std::to_string(src) + ", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_bc_values, h_sigmas, h_labels));
SizeT num_errors = app::bc::Validate_Results(
parameters, graph, src, h_bc_values, h_sigmas, h_labels,
reference_bc_values == NULL ? NULL : reference_bc_values[run_index],
reference_sigmas == NULL ? NULL : reference_sigmas[run_index],
reference_labels == NULL ? NULL : reference_labels[run_index], true);
}
}
cpu_timer.Start();
// Copy out results
GUARD_CU(problem.Extract(h_bc_values, h_sigmas, h_labels));
if (validation == "last") {
auto run_index = (num_runs - 1) % num_srcs;
SizeT num_errors = app::bc::Validate_Results(
parameters, graph, src, h_bc_values, h_sigmas, h_labels,
reference_bc_values == NULL ? NULL : reference_bc_values[run_index],
reference_sigmas == NULL ? NULL : reference_sigmas[run_index],
reference_labels == NULL ? NULL : reference_labels[run_index], true);
}
// compute running statistics
info.ComputeTraversalStats(enactor, h_labels);
// Display_Memory_Usage(problem);
// #ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
// #endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_bc_values;
h_bc_values = NULL;
delete[] h_sigmas;
h_sigmas = NULL;
delete[] h_labels;
h_labels = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace bc
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_bc function
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the BC values
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] bc_values Return betweenness centrality values per vertex
* @param[out] sigmas Return sigma of each vertex
* @param[out] labels Return label of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
double gunrock_bc(gunrock::util::Parameters ¶meters, GraphT &graph,
ValueT **bc_values, ValueT **sigmas,
typename GraphT::VertexT **labels) {
typedef typename GraphT::VertexT VertexT;
typedef gunrock::app::bc::Problem<GraphT> ProblemT;
typedef gunrock::app::bc::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
int num_runs = parameters.Get<int>("num-runs");
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
for (int run_num = 0; run_num < num_runs; ++run_num) {
int src_num = run_num % num_srcs;
VertexT src = srcs[src_num];
#if 1
if (src == -1) {
for (src = 0 ; src < graph.nodes ; ++src) {
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
}
} else {
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
}
#else
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
#endif
problem.Extract(bc_values[src_num], sigmas[src_num], labels[src_num]);
}
enactor.Release(target);
problem.Release(target);
srcs.clear();
return total_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] edge_values CSR-formatted graph input edge weights
* @param[in] num_runs Number of runs to perform BC
* @param[in] sources Sources to begin traverse, one for each run
* @param[out] bc_values Return betweenness centrality values per vertex
* @param[out] sigmas Return sigma of each vertex
* @param[out] labels Return label of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int,
typename GValueT = float, typename BCValueT = GValueT>
float bc(const SizeT num_nodes, const SizeT num_edges, const SizeT *row_offsets,
const VertexT *col_indices, const int num_runs, VertexT *sources,
BCValueT **bc_values, BCValueT **sigmas, VertexT **labels) {
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("bc");
gunrock::graphio::UseParameters(parameters);
gunrock::app::bc::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("num-runs", num_runs);
std::vector<VertexT> srcs;
for (int i = 0; i < num_runs; i++) srcs.push_back(sources[i]);
parameters.Set("srcs", srcs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer((SizeT *)row_offsets, num_nodes + 1, gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer((VertexT *)col_indices, num_edges, gunrock::util::HOST);
graph.FromCsr(graph.csr(), gunrock::util::HOST, 0, quiet, true);
// Run BC
double elapsed_time =
gunrock_bc(parameters, graph, bc_values, sigmas, labels);
// Cleanup
graph.Release();
srcs.clear();
return elapsed_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] num_runs Number of runs to perform BC
* @param[in] sources Sources to begin traverse, one for each run
* @param[out] bc_values Return betweenness centrality values per vertex
* @param[out] sigmas Return sigma of each vertex
* @param[out] labels Return label of each vertex
* \return double Return accumulated elapsed times for all runs
*/
double bc(int num_nodes, int num_edges, const int *row_offsets,
const int *col_indices, int source, float *bc_values, float *sigmas,
int *labels) {
return bc(num_nodes, num_edges, row_offsets, col_indices, 1 /* num_runs */,
&source, &bc_values, &sigmas, &labels);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| bc_app.cu | // //
// ----------------------------------------------------------------------------
// // Gunrock -- Fast and Efficient GPU Graph Library
// //
// ----------------------------------------------------------------------------
// // This source code is distributed under the terms of LICENSE.TXT
// // in the root directory of this source distribution.
// //
// ----------------------------------------------------------------------------
// /**
// * @file bc_app.cu
// *
// * @brief Betweenness Centrality (BC) application
// */
#include <iostream>
#include <gunrock/gunrock.h>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph definations
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
// betweenness centrality path includesls
#include <gunrock/app/bc/bc_enactor.cuh>
#include <gunrock/app/bc/bc_test.cuh>
namespace gunrock {
namespace app {
namespace bc {
cudaError_t UseParameters(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(parameters.Use<std::string>(
"src",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"0",
"<Vertex-ID|random|largestdegree> The source vertices\n"
"\tIf random, randomly select non-zero degree vertices;\n"
"\tIf largestdegree, select vertices with largest degrees",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"src-seed",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
util::PreDefinedValues<int>::InvalidValue,
"seed to generate random sources", __FILE__, __LINE__));
return retval;
}
/**
* @brief Run BC tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] ref_distances Reference distances
* @param[in] target Where to perform the BC computation
* \return cudaError_t error message(s), if any
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT,
typename VertexT = typename GraphT::VertexT>
cudaError_t RunTests(util::Parameters ¶meters, GraphT &graph,
ValueT **reference_bc_values = NULL,
ValueT **reference_sigmas = NULL,
VertexT **reference_labels = NULL,
util::Location target = util::DEVICE) {
std::cout << "--- RunTests ---" << std::endl;
cudaError_t retval = cudaSuccess;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// parse configurations from parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("bc", parameters, graph); // initialize Info structure
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
// Allocate host-side array (for both reference and GPU-computed results)
ValueT *h_bc_values = new ValueT[graph.nodes];
ValueT *h_sigmas = new ValueT[graph.nodes];
VertexT *h_labels = new VertexT[graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
// perform the algorithm
VertexT src;
for (int run_num = 0; run_num < num_runs; ++run_num) {
auto run_index = run_num % num_srcs;
src = srcs[run_index];
#if 1
if (src == -1) {
for (src = 0 ; src < graph.nodes ; ++src) {
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
}
} else {
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
}
#else
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
#endif
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
" ms, src = " + std::to_string(src) + ", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_bc_values, h_sigmas, h_labels));
SizeT num_errors = app::bc::Validate_Results(
parameters, graph, src, h_bc_values, h_sigmas, h_labels,
reference_bc_values == NULL ? NULL : reference_bc_values[run_index],
reference_sigmas == NULL ? NULL : reference_sigmas[run_index],
reference_labels == NULL ? NULL : reference_labels[run_index], true);
}
}
cpu_timer.Start();
// Copy out results
GUARD_CU(problem.Extract(h_bc_values, h_sigmas, h_labels));
if (validation == "last") {
auto run_index = (num_runs - 1) % num_srcs;
SizeT num_errors = app::bc::Validate_Results(
parameters, graph, src, h_bc_values, h_sigmas, h_labels,
reference_bc_values == NULL ? NULL : reference_bc_values[run_index],
reference_sigmas == NULL ? NULL : reference_sigmas[run_index],
reference_labels == NULL ? NULL : reference_labels[run_index], true);
}
// compute running statistics
info.ComputeTraversalStats(enactor, h_labels);
// Display_Memory_Usage(problem);
// #ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
// #endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_bc_values;
h_bc_values = NULL;
delete[] h_sigmas;
h_sigmas = NULL;
delete[] h_labels;
h_labels = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace bc
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_bc function
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the BC values
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] bc_values Return betweenness centrality values per vertex
* @param[out] sigmas Return sigma of each vertex
* @param[out] labels Return label of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
double gunrock_bc(gunrock::util::Parameters ¶meters, GraphT &graph,
ValueT **bc_values, ValueT **sigmas,
typename GraphT::VertexT **labels) {
typedef typename GraphT::VertexT VertexT;
typedef gunrock::app::bc::Problem<GraphT> ProblemT;
typedef gunrock::app::bc::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
int num_runs = parameters.Get<int>("num-runs");
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
for (int run_num = 0; run_num < num_runs; ++run_num) {
int src_num = run_num % num_srcs;
VertexT src = srcs[src_num];
#if 1
if (src == -1) {
for (src = 0 ; src < graph.nodes ; ++src) {
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
}
} else {
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
}
#else
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
#endif
problem.Extract(bc_values[src_num], sigmas[src_num], labels[src_num]);
}
enactor.Release(target);
problem.Release(target);
srcs.clear();
return total_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] edge_values CSR-formatted graph input edge weights
* @param[in] num_runs Number of runs to perform BC
* @param[in] sources Sources to begin traverse, one for each run
* @param[out] bc_values Return betweenness centrality values per vertex
* @param[out] sigmas Return sigma of each vertex
* @param[out] labels Return label of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int,
typename GValueT = float, typename BCValueT = GValueT>
float bc(const SizeT num_nodes, const SizeT num_edges, const SizeT *row_offsets,
const VertexT *col_indices, const int num_runs, VertexT *sources,
BCValueT **bc_values, BCValueT **sigmas, VertexT **labels) {
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("bc");
gunrock::graphio::UseParameters(parameters);
gunrock::app::bc::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("num-runs", num_runs);
std::vector<VertexT> srcs;
for (int i = 0; i < num_runs; i++) srcs.push_back(sources[i]);
parameters.Set("srcs", srcs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer((SizeT *)row_offsets, num_nodes + 1, gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer((VertexT *)col_indices, num_edges, gunrock::util::HOST);
graph.FromCsr(graph.csr(), gunrock::util::HOST, 0, quiet, true);
// Run BC
double elapsed_time =
gunrock_bc(parameters, graph, bc_values, sigmas, labels);
// Cleanup
graph.Release();
srcs.clear();
return elapsed_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] num_runs Number of runs to perform BC
* @param[in] sources Sources to begin traverse, one for each run
* @param[out] bc_values Return betweenness centrality values per vertex
* @param[out] sigmas Return sigma of each vertex
* @param[out] labels Return label of each vertex
* \return double Return accumulated elapsed times for all runs
*/
double bc(int num_nodes, int num_edges, const int *row_offsets,
const int *col_indices, int source, float *bc_values, float *sigmas,
int *labels) {
return bc(num_nodes, num_edges, row_offsets, col_indices, 1 /* num_runs */,
&source, &bc_values, &sigmas, &labels);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
309d5bd600bd9a724fd4f478793b340a6e0266ff.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
void printBoard(unsigned char *board, int rows, int cols)
{
int counter = 0;
for(int i = 0; i < rows; i++)
{
for(int j = 0; j < cols; j++)
{
if(board[counter] == 0)
printf("-");
else
printf("0");
counter++;
}
printf("\n");
}
return;
}
__global__ void life (unsigned char *d_board,int iterations) {
int i,row,col,rows,cols;
unsigned char state,neighbors;
row = blockIdx.y * blockDim.y + threadIdx.y;
col = blockIdx.x * blockDim.x + threadIdx.x;
rows = gridDim.y * blockDim.y;
cols = gridDim.x * blockDim.x;
state = d_board[(row)*cols+(col)];
for (i=0;i<iterations;i++) {
neighbors=0;
if (row!=0) {
if (col!=0) if (d_board[(row-1)*cols+(col-1)]==1) neighbors++;
if (d_board[(row-1)*cols+(col)]==1) neighbors++;
if (col!=(cols-1)) if (d_board[(row-1)*cols+(col+1)]==1) neighbors++;
}
if (col!=0) if (d_board[(row)*cols+(col-1)]==1) neighbors++;
if (col!=(cols-1)) if (d_board[(row)*cols+(col+1)]==1) neighbors++;
if (row!=(rows-1)) {
if (col!=0) if (d_board[(row+1)*cols+(col-1)]==1) neighbors++;
if (d_board[(row+1)*cols+(col)]==1) neighbors++;
if (col!=(cols-1)) if (d_board[(row+1)*cols+(col+1)]==1) neighbors++;
}
if (neighbors<2) state = 0;
else if (neighbors==3) state = 1;
else if (neighbors>3) state = 0;
__syncthreads();
d_board[(row)*cols+(col)]=state;
}
}
int main () {
dim3 gDim,bDim;
unsigned char *h_board,*d_board;
int i,iterations=100;
bDim.y=16;
bDim.x=32;
bDim.z=1;
gDim.y=16;
gDim.x=8;
gDim.z=1;
h_board=(unsigned char *)malloc(sizeof(unsigned char)*256*256);
hipMalloc((void **)&d_board,sizeof(unsigned char)*256*256);
srand(0);
for (i=0;i<256*256;i++) h_board[i]=rand()%2;
printf("Starting state\n");
printBoard(h_board, 256, 256);
hipMemcpy(d_board,h_board,sizeof(unsigned char)*256*256,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( life) , dim3(gDim),dim3(bDim), 0, 0, d_board,iterations);
hipMemcpy(h_board,d_board,sizeof(unsigned char)*256*256,hipMemcpyDeviceToHost);
printf("Ending state\n");
printBoard(h_board, 256, 256);
free(h_board);
hipFree(d_board);
}
| 309d5bd600bd9a724fd4f478793b340a6e0266ff.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda.h>
void printBoard(unsigned char *board, int rows, int cols)
{
int counter = 0;
for(int i = 0; i < rows; i++)
{
for(int j = 0; j < cols; j++)
{
if(board[counter] == 0)
printf("-");
else
printf("0");
counter++;
}
printf("\n");
}
return;
}
__global__ void life (unsigned char *d_board,int iterations) {
int i,row,col,rows,cols;
unsigned char state,neighbors;
row = blockIdx.y * blockDim.y + threadIdx.y;
col = blockIdx.x * blockDim.x + threadIdx.x;
rows = gridDim.y * blockDim.y;
cols = gridDim.x * blockDim.x;
state = d_board[(row)*cols+(col)];
for (i=0;i<iterations;i++) {
neighbors=0;
if (row!=0) {
if (col!=0) if (d_board[(row-1)*cols+(col-1)]==1) neighbors++;
if (d_board[(row-1)*cols+(col)]==1) neighbors++;
if (col!=(cols-1)) if (d_board[(row-1)*cols+(col+1)]==1) neighbors++;
}
if (col!=0) if (d_board[(row)*cols+(col-1)]==1) neighbors++;
if (col!=(cols-1)) if (d_board[(row)*cols+(col+1)]==1) neighbors++;
if (row!=(rows-1)) {
if (col!=0) if (d_board[(row+1)*cols+(col-1)]==1) neighbors++;
if (d_board[(row+1)*cols+(col)]==1) neighbors++;
if (col!=(cols-1)) if (d_board[(row+1)*cols+(col+1)]==1) neighbors++;
}
if (neighbors<2) state = 0;
else if (neighbors==3) state = 1;
else if (neighbors>3) state = 0;
__syncthreads();
d_board[(row)*cols+(col)]=state;
}
}
int main () {
dim3 gDim,bDim;
unsigned char *h_board,*d_board;
int i,iterations=100;
bDim.y=16;
bDim.x=32;
bDim.z=1;
gDim.y=16;
gDim.x=8;
gDim.z=1;
h_board=(unsigned char *)malloc(sizeof(unsigned char)*256*256);
cudaMalloc((void **)&d_board,sizeof(unsigned char)*256*256);
srand(0);
for (i=0;i<256*256;i++) h_board[i]=rand()%2;
printf("Starting state\n");
printBoard(h_board, 256, 256);
cudaMemcpy(d_board,h_board,sizeof(unsigned char)*256*256,cudaMemcpyHostToDevice);
life <<<gDim,bDim>>> (d_board,iterations);
cudaMemcpy(h_board,d_board,sizeof(unsigned char)*256*256,cudaMemcpyDeviceToHost);
printf("Ending state\n");
printBoard(h_board, 256, 256);
free(h_board);
cudaFree(d_board);
}
|
26d4d9788bf0517cd065ab266df62a7cc9af732a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "constants.h"
__global__ void k_means_kernel_assignment_opt(const float *imageIn, float *cluster, float *centroids, const int means,float *accumulator,float *numPixelsCentroid)
{
__shared__ float partialAccumulator[CLUSTERS][THREADS_PER_BLOCK];
__shared__ float partialNumPixelsCentroid[CLUSTERS][THREADS_PER_BLOCK];
// do this for each individual pixel
float min_temp = BIGNUM;
float distance;
int index = blockDim.x * blockIdx.x + threadIdx.x;
int threadNum = threadIdx.x;
int j,k;
for (j = 0; j <means;j++) {
distance = fabs(centroids[j]-imageIn[index]);// compare image to centroids
if (distance<min_temp){
min_temp = distance;
cluster[index]= j;
}
}
int temp1 = (int)cluster[index];
partialAccumulator[temp1][threadNum] = imageIn[index];
partialNumPixelsCentroid[temp1][threadNum] = 1;
__syncthreads();
if ( threadNum< CLUSTERS){
for (k=1; k<THREADS_PER_BLOCK;k++){
partialAccumulator[threadNum][0] += partialAccumulator[threadNum][k];
partialNumPixelsCentroid[threadNum][0] += partialNumPixelsCentroid[threadNum][k];
}
__syncthreads();
accumulator[threadNum*(int)BLOCKS_PER_GRID+blockIdx.x] += partialAccumulator[threadNum][0];
numPixelsCentroid[threadNum*(int)BLOCKS_PER_GRID+blockIdx.x] += partialNumPixelsCentroid[threadNum][0];
}
__syncthreads();
}
__global__ void k_means_kernel_update_opt(const float *imageIn, float *cluster,float *centroids,float *accumulator,float *numPixelsCentroid, int numElements)
{
int index = threadIdx.x;
int i;
for (i =1; i<(int)BLOCKS_PER_GRID; i++){
accumulator[index*(int)BLOCKS_PER_GRID] += accumulator[index*(int)BLOCKS_PER_GRID+i];
numPixelsCentroid[index*(int)BLOCKS_PER_GRID] += numPixelsCentroid[index*(int)BLOCKS_PER_GRID+i];
accumulator[index*(int)BLOCKS_PER_GRID+i] = 0;
numPixelsCentroid[index*(int)BLOCKS_PER_GRID+i] = 0;
}
if (numPixelsCentroid[index*(int)BLOCKS_PER_GRID] != 0){
centroids[index] = accumulator[index*(int)BLOCKS_PER_GRID]/numPixelsCentroid[index*(int)BLOCKS_PER_GRID];
}
numPixelsCentroid[index*(int)BLOCKS_PER_GRID]= 0;
accumulator[index*(int)BLOCKS_PER_GRID] = 0;
}
__global__ void k_means_kernel_writeBack_opt(float *imageOut, const float *imageIn, const float *centroids, const float *cluster)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
int temp2 = (int)cluster[index];
imageOut[index] = centroids[temp2];
}
float* k_means_parallel_optimized(float *imageIn, int clusters, int dimension, int iterations){
struct timespec diff(struct timespec start, struct timespec end);
struct timespec timeStart, timeEnd;
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// the cluster vector
int numElements = (dimension)*(dimension);
size_t size = numElements * sizeof(float);
float *cluster = (float*) malloc(size);// which centroid does each cluster belong to?
float *imageOut = (float*) malloc(size);//output image
// the centroids or means
int means = clusters;
size_t size2= means * sizeof(float);
float *centroids = (float*) malloc(size2);// list of centroids(means)
size_t size3= means * sizeof(float)*(int)((numElements + (256) - 1)/(256));
float *accumulator = (float*) malloc(size3);
float *numPixelsCentroid = (float*) malloc(size3);/*needed for the update average step*/
float range = 255/(means-1);
//initialize step to set everything to zero
for (int m = 0; m < means; m++) {
centroids[m] = range*m;
accumulator[m] =0;
numPixelsCentroid[m] =0;
}
// Allocat DEVICE vectors
float *d_imageIn = NULL;
err = hipMalloc((void**)&d_imageIn,size);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector imageIn (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_imageOut = NULL;
err = hipMalloc((void**)&d_imageOut,size);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector imageOut (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_cluster = NULL;
err = hipMalloc((void**)&d_cluster,size);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector cluster (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_centroids = NULL;
err = hipMalloc((void**)&d_centroids,size2);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector centroids (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_accumulator = NULL;
err = hipMalloc((void**)&d_accumulator,size3);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector accumulator (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_numPixelsCentroid = NULL;
err = hipMalloc((void**)&d_numPixelsCentroid,size3);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device numPixelsCentroid accumulator (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy vectors to DEVICE
printf("Copy input data from the host memory to the CUDA device \n");
err = hipMemcpy(d_imageIn, imageIn, size , hipMemcpyHostToDevice);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy Vector imagein from host to device (error code %s)! \n", hipGetErrorString(err));
}
err = hipMemcpy(d_imageOut, imageOut, size , hipMemcpyHostToDevice);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy Vector imageOut from host to device (error code %s)! \n", hipGetErrorString(err));
}
err = hipMemcpy(d_cluster, cluster, size , hipMemcpyHostToDevice);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy Vector cluster from host to device (error code %s)! \n", hipGetErrorString(err));
}
err = hipMemcpy(d_centroids, centroids, size2 , hipMemcpyHostToDevice);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy Vector centroids from host to device (error code %s)! \n", hipGetErrorString(err));
}
err = hipMemcpy(d_accumulator, accumulator, size3 , hipMemcpyHostToDevice);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy Vector accumulators from host to device (error code %s)! \n", hipGetErrorString(err));
}
err = hipMemcpy(d_numPixelsCentroid, numPixelsCentroid, size3 , hipMemcpyHostToDevice);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy Vector numPixelsCentroid from host to device (error code %s)! \n", hipGetErrorString(err));
}
// Launch the kmeans CUDA kernel
int threadsPerBlock = 256;
int blocksPerGrid = (numElements + 256 - 1)/256;
dim3 dimBlock(THREADS_PER_BLOCK);
dim3 dimGrid(BLOCKS_PER_GRID,CLUSTERS,1);
dim3 dimBlockVR(int(BLOCKS_PER_GRID/THREADS_PER_BLOCK+.5),CLUSTERS,1);
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timeStart);
for (int iters =0; iters<iterations; iters++){
hipLaunchKernelGGL(( k_means_kernel_assignment_opt), dim3(dimGrid),dim3(dimBlock), 0, 0, d_imageIn, d_cluster, d_centroids, means, d_accumulator,d_numPixelsCentroid);
hipDeviceSynchronize();
hipLaunchKernelGGL(( k_means_kernel_update_opt), dim3(1),dim3(means), 0, 0, d_imageIn, d_cluster,d_centroids, d_accumulator,d_numPixelsCentroid,numElements);
hipDeviceSynchronize();
}
hipLaunchKernelGGL(( k_means_kernel_writeBack_opt), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, d_imageOut, d_imageIn, d_centroids, d_cluster);
hipDeviceSynchronize();
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timeEnd);
err = hipGetLastError();
if( err != hipSuccess)
{
fprintf(stderr, "Failed to launch kmeans kernel (error code %s)! \n",hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Internal GPU time: %ld \n", (long int)(((double)GIG *diff(timeStart,timeEnd).tv_sec + diff(timeStart,timeEnd).tv_nsec)));
// Copy the device result vector in device memory to the host result vector
// in host memory
printf("Copy output data from CUDA device to the host memory \n");
err = hipMemcpy(centroids,d_centroids,size2,hipMemcpyDeviceToHost);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector centroids from device to host (error code %s)! \n",hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(imageOut,d_imageOut,size,hipMemcpyDeviceToHost);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector imageOut from device to host (error code %s)! \n",hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free divice global memory
err = hipFree(d_imageOut);
if(err != hipSuccess)
{
fprintf(stderr,"Failed to free device vector centroids (error code %s)! \n",hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_centroids);
if(err != hipSuccess)
{
fprintf(stderr,"Failed to free device vector centroids (error code %s)! \n",hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_accumulator);
if(err != hipSuccess)
{
fprintf(stderr,"Failed to free device vector centroids (error code %s)! \n",hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_numPixelsCentroid);
if(err != hipSuccess)
{
fprintf(stderr,"Failed to free device vector centroids (error code %s)! \n",hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_imageIn);
if(err != hipSuccess)
{
fprintf(stderr,"Failed to free device vector imageIn (error code %s)! \n",hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_cluster);
if(err != hipSuccess)
{
fprintf(stderr,"Failed to free device vector cluster (error code %s)! \n",hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Reset the device and exit
err = hipDeviceReset();
if(err != hipSuccess)
{
fprintf(stderr,"Failed to deinitialize the device! (error code %s)! \n",hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// set output
for (int m = 0; m < means; m++) {
printf("%f \n",centroids[m]);
}
return imageOut;
}
| 26d4d9788bf0517cd065ab266df62a7cc9af732a.cu | #include <stdio.h>
#include <stdlib.h>
#include "constants.h"
__global__ void k_means_kernel_assignment_opt(const float *imageIn, float *cluster, float *centroids, const int means,float *accumulator,float *numPixelsCentroid)
{
__shared__ float partialAccumulator[CLUSTERS][THREADS_PER_BLOCK];
__shared__ float partialNumPixelsCentroid[CLUSTERS][THREADS_PER_BLOCK];
// do this for each individual pixel
float min_temp = BIGNUM;
float distance;
int index = blockDim.x * blockIdx.x + threadIdx.x;
int threadNum = threadIdx.x;
int j,k;
for (j = 0; j <means;j++) {
distance = fabs(centroids[j]-imageIn[index]);// compare image to centroids
if (distance<min_temp){
min_temp = distance;
cluster[index]= j;
}
}
int temp1 = (int)cluster[index];
partialAccumulator[temp1][threadNum] = imageIn[index];
partialNumPixelsCentroid[temp1][threadNum] = 1;
__syncthreads();
if ( threadNum< CLUSTERS){
for (k=1; k<THREADS_PER_BLOCK;k++){
partialAccumulator[threadNum][0] += partialAccumulator[threadNum][k];
partialNumPixelsCentroid[threadNum][0] += partialNumPixelsCentroid[threadNum][k];
}
__syncthreads();
accumulator[threadNum*(int)BLOCKS_PER_GRID+blockIdx.x] += partialAccumulator[threadNum][0];
numPixelsCentroid[threadNum*(int)BLOCKS_PER_GRID+blockIdx.x] += partialNumPixelsCentroid[threadNum][0];
}
__syncthreads();
}
__global__ void k_means_kernel_update_opt(const float *imageIn, float *cluster,float *centroids,float *accumulator,float *numPixelsCentroid, int numElements)
{
int index = threadIdx.x;
int i;
for (i =1; i<(int)BLOCKS_PER_GRID; i++){
accumulator[index*(int)BLOCKS_PER_GRID] += accumulator[index*(int)BLOCKS_PER_GRID+i];
numPixelsCentroid[index*(int)BLOCKS_PER_GRID] += numPixelsCentroid[index*(int)BLOCKS_PER_GRID+i];
accumulator[index*(int)BLOCKS_PER_GRID+i] = 0;
numPixelsCentroid[index*(int)BLOCKS_PER_GRID+i] = 0;
}
if (numPixelsCentroid[index*(int)BLOCKS_PER_GRID] != 0){
centroids[index] = accumulator[index*(int)BLOCKS_PER_GRID]/numPixelsCentroid[index*(int)BLOCKS_PER_GRID];
}
numPixelsCentroid[index*(int)BLOCKS_PER_GRID]= 0;
accumulator[index*(int)BLOCKS_PER_GRID] = 0;
}
__global__ void k_means_kernel_writeBack_opt(float *imageOut, const float *imageIn, const float *centroids, const float *cluster)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
int temp2 = (int)cluster[index];
imageOut[index] = centroids[temp2];
}
float* k_means_parallel_optimized(float *imageIn, int clusters, int dimension, int iterations){
struct timespec diff(struct timespec start, struct timespec end);
struct timespec timeStart, timeEnd;
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// the cluster vector
int numElements = (dimension)*(dimension);
size_t size = numElements * sizeof(float);
float *cluster = (float*) malloc(size);// which centroid does each cluster belong to?
float *imageOut = (float*) malloc(size);//output image
// the centroids or means
int means = clusters;
size_t size2= means * sizeof(float);
float *centroids = (float*) malloc(size2);// list of centroids(means)
size_t size3= means * sizeof(float)*(int)((numElements + (256) - 1)/(256));
float *accumulator = (float*) malloc(size3);
float *numPixelsCentroid = (float*) malloc(size3);/*needed for the update average step*/
float range = 255/(means-1);
//initialize step to set everything to zero
for (int m = 0; m < means; m++) {
centroids[m] = range*m;
accumulator[m] =0;
numPixelsCentroid[m] =0;
}
// Allocat DEVICE vectors
float *d_imageIn = NULL;
err = cudaMalloc((void**)&d_imageIn,size);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector imageIn (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_imageOut = NULL;
err = cudaMalloc((void**)&d_imageOut,size);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector imageOut (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_cluster = NULL;
err = cudaMalloc((void**)&d_cluster,size);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector cluster (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_centroids = NULL;
err = cudaMalloc((void**)&d_centroids,size2);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector centroids (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_accumulator = NULL;
err = cudaMalloc((void**)&d_accumulator,size3);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector accumulator (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_numPixelsCentroid = NULL;
err = cudaMalloc((void**)&d_numPixelsCentroid,size3);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device numPixelsCentroid accumulator (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy vectors to DEVICE
printf("Copy input data from the host memory to the CUDA device \n");
err = cudaMemcpy(d_imageIn, imageIn, size , cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy Vector imagein from host to device (error code %s)! \n", cudaGetErrorString(err));
}
err = cudaMemcpy(d_imageOut, imageOut, size , cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy Vector imageOut from host to device (error code %s)! \n", cudaGetErrorString(err));
}
err = cudaMemcpy(d_cluster, cluster, size , cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy Vector cluster from host to device (error code %s)! \n", cudaGetErrorString(err));
}
err = cudaMemcpy(d_centroids, centroids, size2 , cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy Vector centroids from host to device (error code %s)! \n", cudaGetErrorString(err));
}
err = cudaMemcpy(d_accumulator, accumulator, size3 , cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy Vector accumulators from host to device (error code %s)! \n", cudaGetErrorString(err));
}
err = cudaMemcpy(d_numPixelsCentroid, numPixelsCentroid, size3 , cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy Vector numPixelsCentroid from host to device (error code %s)! \n", cudaGetErrorString(err));
}
// Launch the kmeans CUDA kernel
int threadsPerBlock = 256;
int blocksPerGrid = (numElements + 256 - 1)/256;
dim3 dimBlock(THREADS_PER_BLOCK);
dim3 dimGrid(BLOCKS_PER_GRID,CLUSTERS,1);
dim3 dimBlockVR(int(BLOCKS_PER_GRID/THREADS_PER_BLOCK+.5),CLUSTERS,1);
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timeStart);
for (int iters =0; iters<iterations; iters++){
k_means_kernel_assignment_opt<<<dimGrid,dimBlock>>>(d_imageIn, d_cluster, d_centroids, means, d_accumulator,d_numPixelsCentroid);
cudaThreadSynchronize();
k_means_kernel_update_opt<<<1,means>>>(d_imageIn, d_cluster,d_centroids, d_accumulator,d_numPixelsCentroid,numElements);
cudaThreadSynchronize();
}
k_means_kernel_writeBack_opt<<<blocksPerGrid,threadsPerBlock>>>(d_imageOut, d_imageIn, d_centroids, d_cluster);
cudaThreadSynchronize();
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timeEnd);
err = cudaGetLastError();
if( err != cudaSuccess)
{
fprintf(stderr, "Failed to launch kmeans kernel (error code %s)! \n",cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Internal GPU time: %ld \n", (long int)(((double)GIG *diff(timeStart,timeEnd).tv_sec + diff(timeStart,timeEnd).tv_nsec)));
// Copy the device result vector in device memory to the host result vector
// in host memory
printf("Copy output data from CUDA device to the host memory \n");
err = cudaMemcpy(centroids,d_centroids,size2,cudaMemcpyDeviceToHost);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector centroids from device to host (error code %s)! \n",cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(imageOut,d_imageOut,size,cudaMemcpyDeviceToHost);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector imageOut from device to host (error code %s)! \n",cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free divice global memory
err = cudaFree(d_imageOut);
if(err != cudaSuccess)
{
fprintf(stderr,"Failed to free device vector centroids (error code %s)! \n",cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_centroids);
if(err != cudaSuccess)
{
fprintf(stderr,"Failed to free device vector centroids (error code %s)! \n",cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_accumulator);
if(err != cudaSuccess)
{
fprintf(stderr,"Failed to free device vector centroids (error code %s)! \n",cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_numPixelsCentroid);
if(err != cudaSuccess)
{
fprintf(stderr,"Failed to free device vector centroids (error code %s)! \n",cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_imageIn);
if(err != cudaSuccess)
{
fprintf(stderr,"Failed to free device vector imageIn (error code %s)! \n",cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_cluster);
if(err != cudaSuccess)
{
fprintf(stderr,"Failed to free device vector cluster (error code %s)! \n",cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Reset the device and exit
err = cudaDeviceReset();
if(err != cudaSuccess)
{
fprintf(stderr,"Failed to deinitialize the device! (error code %s)! \n",cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// set output
for (int m = 0; m < means; m++) {
printf("%f \n",centroids[m]);
}
return imageOut;
}
|
23dc9c8a82a0881a77e22d10410bba241fbef318.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_TOP_K_CATEGORICAL_ACCURACY_LAYER_INSTANTIATE
#include "lbann/layers/loss/top_k_categorical_accuracy.hpp"
#include "lbann/utils/cuda.hpp"
#include "lbann/utils/exception.hpp"
#include <thrust/sort.h>
#include <thrust/iterator/discard_iterator.h>
namespace lbann {
namespace {
/** Sparse vector entry. */
template <typename TensorDataType>
struct entry {
/** Vector entry value. */
TensorDataType value;
/** Vector entry index. */
El::Int index;
};
/** Comparison operation to sort sparse vector entries.
* Entries are sorted by value in decreasing order, with ties broken
* in favor of entries with smaller indices.
*/
template <typename TensorDataType>
struct entry_compare : ::thrust::binary_function<entry<TensorDataType>,entry<TensorDataType>,bool> {
__host__ __device__ bool operator()(const entry<TensorDataType>& a, const entry<TensorDataType>& b) const {
return a.value > b.value || (a.value == b.value && a.index < b.index);
}
};
/** Convert columns of a dense matrix into sparse vectors.
* The matrix and vectors are both distributed, so entry indices in
* the sparse vectors correspond to global row indices in the dense
* matrix.
*/
template <typename TensorDataType>
__global__ void dense_matrix_to_sparse_vectors(El::Int local_vector_size,
El::Int local_matrix_height,
El::Int local_matrix_width,
El::Int global_matrix_height,
El::Int global_matrix_col_shift,
El::Int global_matrix_col_stride,
const TensorDataType* __restrict__ local_matrix,
El::Int local_matrix_ldim,
entry<TensorDataType>* __restrict__ local_entries,
El::Int local_entries_ldim) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int num_threads = blockDim.x * gridDim.x;
const El::Int num_local_entries = local_vector_size * local_matrix_width;
for (El::Int i = gid; i < num_local_entries; i += num_threads) {
const auto& local_row = i % local_vector_size;
const auto& local_col = i / local_vector_size;
auto& current_entry = local_entries[local_row + local_col * local_entries_ldim];
if (local_row < local_matrix_height) {
const auto& global_row = (global_matrix_col_shift
+ local_row * global_matrix_col_stride);
current_entry.value = local_matrix[local_row + local_col * local_matrix_ldim];
current_entry.index = global_row;
} else {
current_entry.value = -cuda::infinity<TensorDataType>();
current_entry.index = global_matrix_height;
}
}
}
/** Fill an array with a corresponding tensor index.
* Consider a d(1) x d(2) x ... x d(n) tensor with entry indices
* denoted with (i(1), ..., i(n)). This tensor is contiguous in
* memory with d(1) as the most major dimension and d(n) as the most
* minor (e.g. d(1) is the width and d(2) is the height for a
* column-major matrix). Given some k, this kernel sets each entry in
* the tensor to i(k). Using this notation:
* tensor_size = d(1) * ... * d(n)
* dim = d(k)
* dim_stride = d(k+1) * ... * d(n)
*/
__global__ void fill_with_tensor_index(El::Int tensor_size,
El::Int dim,
El::Int dim_stride,
El::Int* tensor) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int num_threads = blockDim.x * gridDim.x;
for (El::Int i = gid; i < tensor_size; i += num_threads) {
tensor[i] = (i / dim_stride) % dim;
}
}
/** Get indices corresponding to one-hot matrix.
* Each column of the input matrix is interpreted as a one-hot
* vector. Note that we may get race conditions if a matrix column is
* not a one-hot vector.
*/
template <typename TensorDataType>
__global__ void one_hot_matrix_to_indices(El::Int local_height,
El::Int local_width,
El::Int global_matrix_col_shift,
El::Int global_matrix_col_stride,
const TensorDataType* __restrict__ local_matrix,
El::Int local_matrix_ldim,
El::Int* __restrict__ indices) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int num_threads = blockDim.x * gridDim.x;
const El::Int local_size = local_height * local_width;
for (El::Int i = gid; i < local_size; i += num_threads) {
const auto& local_row = i % local_height;
const auto& local_col = i / local_height;
if (local_matrix[local_row + local_col * local_matrix_ldim] > TensorDataType(0.0)) {
const auto& global_row = (global_matrix_col_shift
+ local_row * global_matrix_col_stride);
indices[local_col] = global_row;
}
}
}
/** Compute categorical accuracy for each matrix column.
* Loss is one if the label index matches one of the top-k entries
* and is otherwise zero.
*/
template <typename TensorDataType>
__global__ void compute_categorical_accuracy(El::Int k,
El::Int width,
El::Int max_entry,
const entry<TensorDataType>* __restrict__ top_entries,
El::Int top_entries_ldim,
const El::Int* __restrict__ label_indices,
TensorDataType* __restrict__ loss,
El::Int loss_stride) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int num_threads = blockDim.x * gridDim.x;
const El::Int num_entries = width * k;
for (El::Int i = gid; i < num_entries; i += num_threads) {
const auto& ind = i % k;
const auto& col = i / k;
const auto& label_index = label_indices[col];
if (top_entries[ind + col * top_entries_ldim].index == label_index
&& label_index <= max_entry) {
loss[col * loss_stride] = TensorDataType(1.0);
}
}
}
/** GPU implementation of top-k categorical accuracy layer forward prop. */
template <typename TensorDataType>
void fp_gpu(lbann_comm& comm,
El::Int k,
const El::AbstractDistMatrix<TensorDataType>& predictions,
const El::AbstractDistMatrix<TensorDataType>& labels,
El::AbstractDistMatrix<TensorDataType>& loss) {
// Local matrices
const auto& local_predictions = predictions.LockedMatrix();
const auto& local_labels = labels.LockedMatrix();
auto& local_loss = loss.Matrix();
const auto& height = predictions.Height();
const auto& local_height = local_predictions.Height();
const auto& local_width = local_predictions.Width();
// Trivial cases
if (k < 1) {
El::Zero(loss);
return;
} else if (k >= height) {
El::Fill(loss, El::TypeTraits<TensorDataType>::One());
return;
} else if (local_width < 1) {
return;
}
// Column communicator
auto&& col_comm = predictions.ColComm();
const auto& col_comm_rank = El::mpi::Rank(col_comm);
const auto& col_comm_size = El::mpi::Size(col_comm);
const auto& col_comm_root = loss.RowOwner(0);
// GPU objects
auto&& stream = hydrogen::cuda::GetDefaultStream();
auto&& event = hydrogen::cuda::GetDefaultEvent();
El::SyncInfo<El::Device::GPU> syncInfo{stream, event};
cuda::thrust::allocator<> alloc(stream);
// Get label indices
cuda::thrust::vector<El::Int> label_indices(local_width, height);
{
const auto& local_size = local_height * local_width;
const auto& block_dim = 256;
const auto& grid_dim = (local_size + block_dim - 1) / block_dim;
hipLaunchKernelGGL(( one_hot_matrix_to_indices), dim3(grid_dim), dim3(block_dim), 0, stream,
local_height, local_width,
labels.ColShift(), labels.ColStride(),
local_labels.LockedBuffer(), local_labels.LDim(),
label_indices.data().get());
/// @todo The LBANN Aluminum interface doesn't gracefully handle
/// GPU data that is not TensorDataType.
El::mpi::AllReduce(label_indices.data().get(),
label_indices.size(),
El::mpi::MIN,
col_comm, syncInfo);
}
// Find top-k entries in each column of local prediction matrix
cuda::thrust::vector<entry<TensorDataType>> top_entries(local_width * k);
{
const auto& num_local_entries_per_col = ::max(local_height, k);
const auto& num_local_entries = local_width * num_local_entries_per_col;
const auto& block_dim = 256;
const auto& grid_dim = (num_local_entries + block_dim - 1) / block_dim;
cuda::thrust::vector<entry<TensorDataType>> local_entries(num_local_entries);
cuda::thrust::vector<El::Int> local_entries_cols(num_local_entries);
hipLaunchKernelGGL(( dense_matrix_to_sparse_vectors), dim3(grid_dim), dim3(block_dim), 0, stream,
num_local_entries_per_col, local_height, local_width, height,
predictions.ColShift(), predictions.ColStride(),
local_predictions.LockedBuffer(), local_predictions.LDim(),
local_entries.data().get(), num_local_entries_per_col);
hipLaunchKernelGGL(( fill_with_tensor_index), dim3(grid_dim), dim3(block_dim), 0, stream,
num_local_entries, local_width, num_local_entries_per_col,
local_entries_cols.data().get());
::thrust::sort_by_key(alloc.system(),
local_entries.begin(),
local_entries.end(),
local_entries_cols.begin(),
entry_compare<TensorDataType>());
::thrust::stable_sort_by_key(alloc.system(),
local_entries_cols.begin(),
local_entries_cols.end(),
local_entries.begin());
CHECK_CUDA(hipMemcpy2DAsync(top_entries.data().get(),
k * sizeof(entry<TensorDataType>),
local_entries.data().get(),
num_local_entries_per_col * sizeof(entry<TensorDataType>),
k * sizeof(entry<TensorDataType>),
local_width,
hipMemcpyDeviceToDevice,
stream));
}
// Find top-k entries in each column of global prediction matrix
if (col_comm_size > 1) {
const auto& num_entries_per_rank = local_width * k;
const auto& num_entries = col_comm_size * num_entries_per_rank;
const auto& block_dim = 256;
const auto& grid_dim = (num_entries + block_dim - 1) / block_dim;
if (col_comm_rank != col_comm_root) {
comm.gather(reinterpret_cast<El::byte*>(top_entries.data().get()),
top_entries.size() * sizeof(entry<TensorDataType>),
col_comm_root,
col_comm, syncInfo);
} else {
cuda::thrust::vector<entry<TensorDataType>> global_top_entries(num_entries);
cuda::thrust::vector<El::Int> global_top_entries_cols(num_entries);
comm.gather(reinterpret_cast<El::byte*>(top_entries.data().get()),
top_entries.size() * sizeof(entry<TensorDataType>),
reinterpret_cast<El::byte*>(global_top_entries.data().get()),
col_comm, syncInfo);
hipLaunchKernelGGL(( fill_with_tensor_index), dim3(grid_dim), dim3(block_dim), 0, stream,
num_entries, local_width, k, global_top_entries_cols.data().get());
::thrust::sort_by_key(alloc.system(),
global_top_entries.begin(),
global_top_entries.end(),
global_top_entries_cols.begin(),
entry_compare<TensorDataType>());
::thrust::stable_sort_by_key(alloc.system(),
global_top_entries_cols.begin(),
global_top_entries_cols.end(),
global_top_entries.begin());
CHECK_CUDA(hipMemcpy2DAsync(top_entries.data().get(),
k * sizeof(entry<TensorDataType>),
global_top_entries.data().get(),
col_comm_size * k * sizeof(entry<TensorDataType>),
k * sizeof(entry<TensorDataType>),
local_width,
hipMemcpyDeviceToDevice,
stream));
}
}
// Compute categorical accuracy
El::Zero(loss);
if (col_comm_rank == col_comm_root) {
const auto& num_entries = local_width * k;
const auto& block_dim = 256;
const auto& grid_dim = (num_entries + block_dim - 1) / block_dim;
hipLaunchKernelGGL(( compute_categorical_accuracy), dim3(grid_dim), dim3(block_dim), 0, stream,
k, local_width, height-1,
top_entries.data().get(), k,
label_indices.data().get(),
local_loss.Buffer(), local_loss.LDim());
}
}
} // namespace
template <typename TensorDataType, data_layout T_layout, El::Device Dev>
void top_k_categorical_accuracy_layer<TensorDataType, T_layout, Dev>::fp_compute() {
fp_gpu(*this->get_comm(),
this->m_k,
this->get_prev_activations(0),
this->get_prev_activations(1),
this->get_activations());
}
#define PROTO(T) \
template class top_k_categorical_accuracy_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class top_k_categorical_accuracy_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| 23dc9c8a82a0881a77e22d10410bba241fbef318.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_TOP_K_CATEGORICAL_ACCURACY_LAYER_INSTANTIATE
#include "lbann/layers/loss/top_k_categorical_accuracy.hpp"
#include "lbann/utils/cuda.hpp"
#include "lbann/utils/exception.hpp"
#include <thrust/sort.h>
#include <thrust/iterator/discard_iterator.h>
namespace lbann {
namespace {
/** Sparse vector entry. */
template <typename TensorDataType>
struct entry {
/** Vector entry value. */
TensorDataType value;
/** Vector entry index. */
El::Int index;
};
/** Comparison operation to sort sparse vector entries.
* Entries are sorted by value in decreasing order, with ties broken
* in favor of entries with smaller indices.
*/
template <typename TensorDataType>
struct entry_compare : ::thrust::binary_function<entry<TensorDataType>,entry<TensorDataType>,bool> {
__host__ __device__ bool operator()(const entry<TensorDataType>& a, const entry<TensorDataType>& b) const {
return a.value > b.value || (a.value == b.value && a.index < b.index);
}
};
/** Convert columns of a dense matrix into sparse vectors.
* The matrix and vectors are both distributed, so entry indices in
* the sparse vectors correspond to global row indices in the dense
* matrix.
*/
template <typename TensorDataType>
__global__ void dense_matrix_to_sparse_vectors(El::Int local_vector_size,
El::Int local_matrix_height,
El::Int local_matrix_width,
El::Int global_matrix_height,
El::Int global_matrix_col_shift,
El::Int global_matrix_col_stride,
const TensorDataType* __restrict__ local_matrix,
El::Int local_matrix_ldim,
entry<TensorDataType>* __restrict__ local_entries,
El::Int local_entries_ldim) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int num_threads = blockDim.x * gridDim.x;
const El::Int num_local_entries = local_vector_size * local_matrix_width;
for (El::Int i = gid; i < num_local_entries; i += num_threads) {
const auto& local_row = i % local_vector_size;
const auto& local_col = i / local_vector_size;
auto& current_entry = local_entries[local_row + local_col * local_entries_ldim];
if (local_row < local_matrix_height) {
const auto& global_row = (global_matrix_col_shift
+ local_row * global_matrix_col_stride);
current_entry.value = local_matrix[local_row + local_col * local_matrix_ldim];
current_entry.index = global_row;
} else {
current_entry.value = -cuda::infinity<TensorDataType>();
current_entry.index = global_matrix_height;
}
}
}
/** Fill an array with a corresponding tensor index.
* Consider a d(1) x d(2) x ... x d(n) tensor with entry indices
* denoted with (i(1), ..., i(n)). This tensor is contiguous in
* memory with d(1) as the most major dimension and d(n) as the most
* minor (e.g. d(1) is the width and d(2) is the height for a
* column-major matrix). Given some k, this kernel sets each entry in
* the tensor to i(k). Using this notation:
* tensor_size = d(1) * ... * d(n)
* dim = d(k)
* dim_stride = d(k+1) * ... * d(n)
*/
__global__ void fill_with_tensor_index(El::Int tensor_size,
El::Int dim,
El::Int dim_stride,
El::Int* tensor) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int num_threads = blockDim.x * gridDim.x;
for (El::Int i = gid; i < tensor_size; i += num_threads) {
tensor[i] = (i / dim_stride) % dim;
}
}
/** Get indices corresponding to one-hot matrix.
* Each column of the input matrix is interpreted as a one-hot
* vector. Note that we may get race conditions if a matrix column is
* not a one-hot vector.
*/
template <typename TensorDataType>
__global__ void one_hot_matrix_to_indices(El::Int local_height,
El::Int local_width,
El::Int global_matrix_col_shift,
El::Int global_matrix_col_stride,
const TensorDataType* __restrict__ local_matrix,
El::Int local_matrix_ldim,
El::Int* __restrict__ indices) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int num_threads = blockDim.x * gridDim.x;
const El::Int local_size = local_height * local_width;
for (El::Int i = gid; i < local_size; i += num_threads) {
const auto& local_row = i % local_height;
const auto& local_col = i / local_height;
if (local_matrix[local_row + local_col * local_matrix_ldim] > TensorDataType(0.0)) {
const auto& global_row = (global_matrix_col_shift
+ local_row * global_matrix_col_stride);
indices[local_col] = global_row;
}
}
}
/** Compute categorical accuracy for each matrix column.
* Loss is one if the label index matches one of the top-k entries
* and is otherwise zero.
*/
template <typename TensorDataType>
__global__ void compute_categorical_accuracy(El::Int k,
El::Int width,
El::Int max_entry,
const entry<TensorDataType>* __restrict__ top_entries,
El::Int top_entries_ldim,
const El::Int* __restrict__ label_indices,
TensorDataType* __restrict__ loss,
El::Int loss_stride) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int num_threads = blockDim.x * gridDim.x;
const El::Int num_entries = width * k;
for (El::Int i = gid; i < num_entries; i += num_threads) {
const auto& ind = i % k;
const auto& col = i / k;
const auto& label_index = label_indices[col];
if (top_entries[ind + col * top_entries_ldim].index == label_index
&& label_index <= max_entry) {
loss[col * loss_stride] = TensorDataType(1.0);
}
}
}
/** GPU implementation of top-k categorical accuracy layer forward prop. */
template <typename TensorDataType>
void fp_gpu(lbann_comm& comm,
El::Int k,
const El::AbstractDistMatrix<TensorDataType>& predictions,
const El::AbstractDistMatrix<TensorDataType>& labels,
El::AbstractDistMatrix<TensorDataType>& loss) {
// Local matrices
const auto& local_predictions = predictions.LockedMatrix();
const auto& local_labels = labels.LockedMatrix();
auto& local_loss = loss.Matrix();
const auto& height = predictions.Height();
const auto& local_height = local_predictions.Height();
const auto& local_width = local_predictions.Width();
// Trivial cases
if (k < 1) {
El::Zero(loss);
return;
} else if (k >= height) {
El::Fill(loss, El::TypeTraits<TensorDataType>::One());
return;
} else if (local_width < 1) {
return;
}
// Column communicator
auto&& col_comm = predictions.ColComm();
const auto& col_comm_rank = El::mpi::Rank(col_comm);
const auto& col_comm_size = El::mpi::Size(col_comm);
const auto& col_comm_root = loss.RowOwner(0);
// GPU objects
auto&& stream = hydrogen::cuda::GetDefaultStream();
auto&& event = hydrogen::cuda::GetDefaultEvent();
El::SyncInfo<El::Device::GPU> syncInfo{stream, event};
cuda::thrust::allocator<> alloc(stream);
// Get label indices
cuda::thrust::vector<El::Int> label_indices(local_width, height);
{
const auto& local_size = local_height * local_width;
const auto& block_dim = 256;
const auto& grid_dim = (local_size + block_dim - 1) / block_dim;
one_hot_matrix_to_indices<<<grid_dim, block_dim, 0, stream>>>(
local_height, local_width,
labels.ColShift(), labels.ColStride(),
local_labels.LockedBuffer(), local_labels.LDim(),
label_indices.data().get());
/// @todo The LBANN Aluminum interface doesn't gracefully handle
/// GPU data that is not TensorDataType.
El::mpi::AllReduce(label_indices.data().get(),
label_indices.size(),
El::mpi::MIN,
col_comm, syncInfo);
}
// Find top-k entries in each column of local prediction matrix
cuda::thrust::vector<entry<TensorDataType>> top_entries(local_width * k);
{
const auto& num_local_entries_per_col = std::max(local_height, k);
const auto& num_local_entries = local_width * num_local_entries_per_col;
const auto& block_dim = 256;
const auto& grid_dim = (num_local_entries + block_dim - 1) / block_dim;
cuda::thrust::vector<entry<TensorDataType>> local_entries(num_local_entries);
cuda::thrust::vector<El::Int> local_entries_cols(num_local_entries);
dense_matrix_to_sparse_vectors<<<grid_dim, block_dim, 0, stream>>>(
num_local_entries_per_col, local_height, local_width, height,
predictions.ColShift(), predictions.ColStride(),
local_predictions.LockedBuffer(), local_predictions.LDim(),
local_entries.data().get(), num_local_entries_per_col);
fill_with_tensor_index<<<grid_dim, block_dim, 0, stream>>>(
num_local_entries, local_width, num_local_entries_per_col,
local_entries_cols.data().get());
::thrust::sort_by_key(alloc.system(),
local_entries.begin(),
local_entries.end(),
local_entries_cols.begin(),
entry_compare<TensorDataType>());
::thrust::stable_sort_by_key(alloc.system(),
local_entries_cols.begin(),
local_entries_cols.end(),
local_entries.begin());
CHECK_CUDA(cudaMemcpy2DAsync(top_entries.data().get(),
k * sizeof(entry<TensorDataType>),
local_entries.data().get(),
num_local_entries_per_col * sizeof(entry<TensorDataType>),
k * sizeof(entry<TensorDataType>),
local_width,
cudaMemcpyDeviceToDevice,
stream));
}
// Find top-k entries in each column of global prediction matrix
if (col_comm_size > 1) {
const auto& num_entries_per_rank = local_width * k;
const auto& num_entries = col_comm_size * num_entries_per_rank;
const auto& block_dim = 256;
const auto& grid_dim = (num_entries + block_dim - 1) / block_dim;
if (col_comm_rank != col_comm_root) {
comm.gather(reinterpret_cast<El::byte*>(top_entries.data().get()),
top_entries.size() * sizeof(entry<TensorDataType>),
col_comm_root,
col_comm, syncInfo);
} else {
cuda::thrust::vector<entry<TensorDataType>> global_top_entries(num_entries);
cuda::thrust::vector<El::Int> global_top_entries_cols(num_entries);
comm.gather(reinterpret_cast<El::byte*>(top_entries.data().get()),
top_entries.size() * sizeof(entry<TensorDataType>),
reinterpret_cast<El::byte*>(global_top_entries.data().get()),
col_comm, syncInfo);
fill_with_tensor_index<<<grid_dim, block_dim, 0, stream>>>(
num_entries, local_width, k, global_top_entries_cols.data().get());
::thrust::sort_by_key(alloc.system(),
global_top_entries.begin(),
global_top_entries.end(),
global_top_entries_cols.begin(),
entry_compare<TensorDataType>());
::thrust::stable_sort_by_key(alloc.system(),
global_top_entries_cols.begin(),
global_top_entries_cols.end(),
global_top_entries.begin());
CHECK_CUDA(cudaMemcpy2DAsync(top_entries.data().get(),
k * sizeof(entry<TensorDataType>),
global_top_entries.data().get(),
col_comm_size * k * sizeof(entry<TensorDataType>),
k * sizeof(entry<TensorDataType>),
local_width,
cudaMemcpyDeviceToDevice,
stream));
}
}
// Compute categorical accuracy
El::Zero(loss);
if (col_comm_rank == col_comm_root) {
const auto& num_entries = local_width * k;
const auto& block_dim = 256;
const auto& grid_dim = (num_entries + block_dim - 1) / block_dim;
compute_categorical_accuracy<<<grid_dim, block_dim, 0, stream>>>(
k, local_width, height-1,
top_entries.data().get(), k,
label_indices.data().get(),
local_loss.Buffer(), local_loss.LDim());
}
}
} // namespace
template <typename TensorDataType, data_layout T_layout, El::Device Dev>
void top_k_categorical_accuracy_layer<TensorDataType, T_layout, Dev>::fp_compute() {
fp_gpu(*this->get_comm(),
this->m_k,
this->get_prev_activations(0),
this->get_prev_activations(1),
this->get_activations());
}
#define PROTO(T) \
template class top_k_categorical_accuracy_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class top_k_categorical_accuracy_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
cfce02336b34fffeb44ae38d41f7946e32c6c640.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reduction.h"
namespace cg = cooperative_groups;
__device__ double getMax(double x, double y) {
return (x > y) ? x : y;
}
__device__ double getSum(double x, double y) {
return x + y;
}
__global__ void reduceKernelMax2(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
cg::sync(cta);
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] = getMax(sdata[tid], sdata[tid + s]);
}
cg::sync(cta);
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduceKernelMax3(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
double result = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
result = getMax(result, g_idata[i + blockDim.x]);
sdata[tid] = result;
cg::sync(cta);
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] = result = getMax(result, sdata[tid + s]);
}
cg::sync(cta);
}
if (tid == 0) g_odata[blockIdx.x] = result;
}
template <unsigned int blockSize>
__global__ void reduceKernelMax4(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
double result = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
result = getMax(result, g_idata[i + blockDim.x]);
sdata[tid] = result;
cg::sync(cta);
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1)
{
if (tid < s)
{
sdata[tid] = result = getMax(result, sdata[tid + s]);
}
cg::sync(cta);
}
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
if (cta.thread_rank() < 32)
{
if (blockSize >= 64) result = getMax(result, sdata[tid + 32]);
for (int offset = tile32.size() / 2; offset > 0; offset /= 2)
{
result = getMax(result, tile32.shfl_down(result, offset));
}
}
if (tid == 0) g_odata[blockIdx.x] = result;
}
template <unsigned int blockSize>
__global__ void reduceKernelMax5(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
double result = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
result = getMax(result, g_idata[i + blockDim.x]);
sdata[tid] = result;
cg::sync(cta);
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = result = getMax(result, sdata[tid + 256]);
}
cg::sync(cta);
if ((blockSize >= 256) && (tid < 128))
{
sdata[tid] = result = getMax(result, sdata[tid + 128]);
}
cg::sync(cta);
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = result = getMax(result, sdata[tid + 64]);
}
cg::sync(cta);
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
if (cta.thread_rank() < 32)
{
if (blockSize >= 64) result = getMax(result, sdata[tid + 32]);
for (int offset = tile32.size() / 2; offset > 0; offset /= 2)
{
result = getMax(result, tile32.shfl_down(result, offset));
}
}
if (tid == 0) g_odata[blockIdx.x] = result;
}
__global__ void reduceKernelSum2(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
cg::sync(cta);
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] = getSum(sdata[tid], sdata[tid + s]);
}
cg::sync(cta);
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduceKernelSum3(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
double result = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
result = getSum(result, g_idata[i + blockDim.x]);
sdata[tid] = result;
cg::sync(cta);
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] = result = getSum(result, sdata[tid + s]);
}
cg::sync(cta);
}
if (tid == 0) g_odata[blockIdx.x] = result;
}
template <unsigned int blockSize>
__global__ void reduceKernelSum4(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
double result = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
result = getSum(result, g_idata[i + blockDim.x]);
sdata[tid] = result;
cg::sync(cta);
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1)
{
if (tid < s)
{
sdata[tid] = result = getSum(result, sdata[tid + s]);
}
cg::sync(cta);
}
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
if (cta.thread_rank() < 32)
{
if (blockSize >= 64) result = getSum(result, sdata[tid + 32]);
for (int offset = tile32.size() / 2; offset > 0; offset /= 2)
{
result = getSum(result, tile32.shfl_down(result, offset));
}
}
if (tid == 0) g_odata[blockIdx.x] = result;
}
template <unsigned int blockSize>
__global__ void reduceKernelSum5(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
double result = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
result = getSum(result, g_idata[i + blockDim.x]);
sdata[tid] = result;
cg::sync(cta);
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = result = getSum(result, sdata[tid + 256]);
}
cg::sync(cta);
if ((blockSize >= 256) && (tid < 128))
{
sdata[tid] = result = getSum(result, sdata[tid + 128]);
}
cg::sync(cta);
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = result = getSum(result, sdata[tid + 64]);
}
cg::sync(cta);
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
if (cta.thread_rank() < 32)
{
if (blockSize >= 64) result = getSum(result, sdata[tid + 32]);
for (int offset = tile32.size() / 2; offset > 0; offset /= 2)
{
result = getSum(result, tile32.shfl_down(result, offset));
}
}
if (tid == 0) g_odata[blockIdx.x] = result;
}
void reduce(int wichKernel, int type, int size, int threads, int blocks, double *d_idata, double *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = threads * sizeof(double);
switch (type)
{
case MAXIMUM:
switch (wichKernel)
{
case 2:
reduceKernelMax2 << <dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 3:
reduceKernelMax3 << <dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 4:
switch (threads)
{
case 512:
reduceKernelMax4<512> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 256:
reduceKernelMax4<256> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 128:
reduceKernelMax4<128> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 64:
reduceKernelMax4<64> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 32:
reduceKernelMax4<32> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 16:
reduceKernelMax4<16> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 8:
reduceKernelMax4<8> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 4:
reduceKernelMax4<4> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 2:
reduceKernelMax4<2> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 1:
reduceKernelMax4<1> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
}
break;
case 5:
switch (threads)
{
case 512:
reduceKernelMax5<512> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 256:
reduceKernelMax5<256> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 128:
reduceKernelMax5<128> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 64:
reduceKernelMax5<64> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 32:
reduceKernelMax5<32> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 16:
reduceKernelMax5<16> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 8:
reduceKernelMax5<8> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 4:
reduceKernelMax5<4> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 2:
reduceKernelMax5<2> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 1:
reduceKernelMax5<1> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
}
break;
default:
throw;
//break;
}
break;
case SUMMATION:
switch (wichKernel)
{
case 2:
reduceKernelSum2 << <dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 3:
reduceKernelSum3 << <dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 4:
switch (threads)
{
case 512:
reduceKernelSum4<512> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 256:
reduceKernelSum4<256> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 128:
reduceKernelSum4<128> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 64:
reduceKernelSum4<64> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 32:
reduceKernelSum4<32> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 16:
reduceKernelSum4<16> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 8:
reduceKernelSum4<8> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 4:
reduceKernelSum4<4> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 2:
reduceKernelSum4<2> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 1:
reduceKernelSum4<1> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
}
break;
case 5:
switch (threads)
{
case 512:
reduceKernelSum5<512> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 256:
reduceKernelSum5<256> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 128:
reduceKernelSum5<128> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 64:
reduceKernelSum5<64> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 32:
reduceKernelSum5<32> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 16:
reduceKernelSum5<16> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 8:
reduceKernelSum5<8> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 4:
reduceKernelSum5<4> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 2:
reduceKernelSum5<2> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 1:
reduceKernelSum5<1> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
}
break;
default:
throw;
//break;
}
break;
default:
//throw;
break;
}
}
| cfce02336b34fffeb44ae38d41f7946e32c6c640.cu | #include "reduction.h"
namespace cg = cooperative_groups;
__device__ double getMax(double x, double y) {
return (x > y) ? x : y;
}
__device__ double getSum(double x, double y) {
return x + y;
}
__global__ void reduceKernelMax2(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
cg::sync(cta);
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] = getMax(sdata[tid], sdata[tid + s]);
}
cg::sync(cta);
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduceKernelMax3(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
double result = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
result = getMax(result, g_idata[i + blockDim.x]);
sdata[tid] = result;
cg::sync(cta);
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] = result = getMax(result, sdata[tid + s]);
}
cg::sync(cta);
}
if (tid == 0) g_odata[blockIdx.x] = result;
}
template <unsigned int blockSize>
__global__ void reduceKernelMax4(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
double result = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
result = getMax(result, g_idata[i + blockDim.x]);
sdata[tid] = result;
cg::sync(cta);
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1)
{
if (tid < s)
{
sdata[tid] = result = getMax(result, sdata[tid + s]);
}
cg::sync(cta);
}
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
if (cta.thread_rank() < 32)
{
if (blockSize >= 64) result = getMax(result, sdata[tid + 32]);
for (int offset = tile32.size() / 2; offset > 0; offset /= 2)
{
result = getMax(result, tile32.shfl_down(result, offset));
}
}
if (tid == 0) g_odata[blockIdx.x] = result;
}
template <unsigned int blockSize>
__global__ void reduceKernelMax5(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
double result = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
result = getMax(result, g_idata[i + blockDim.x]);
sdata[tid] = result;
cg::sync(cta);
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = result = getMax(result, sdata[tid + 256]);
}
cg::sync(cta);
if ((blockSize >= 256) && (tid < 128))
{
sdata[tid] = result = getMax(result, sdata[tid + 128]);
}
cg::sync(cta);
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = result = getMax(result, sdata[tid + 64]);
}
cg::sync(cta);
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
if (cta.thread_rank() < 32)
{
if (blockSize >= 64) result = getMax(result, sdata[tid + 32]);
for (int offset = tile32.size() / 2; offset > 0; offset /= 2)
{
result = getMax(result, tile32.shfl_down(result, offset));
}
}
if (tid == 0) g_odata[blockIdx.x] = result;
}
__global__ void reduceKernelSum2(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
cg::sync(cta);
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] = getSum(sdata[tid], sdata[tid + s]);
}
cg::sync(cta);
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduceKernelSum3(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
double result = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
result = getSum(result, g_idata[i + blockDim.x]);
sdata[tid] = result;
cg::sync(cta);
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] = result = getSum(result, sdata[tid + s]);
}
cg::sync(cta);
}
if (tid == 0) g_odata[blockIdx.x] = result;
}
template <unsigned int blockSize>
__global__ void reduceKernelSum4(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
double result = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
result = getSum(result, g_idata[i + blockDim.x]);
sdata[tid] = result;
cg::sync(cta);
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1)
{
if (tid < s)
{
sdata[tid] = result = getSum(result, sdata[tid + s]);
}
cg::sync(cta);
}
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
if (cta.thread_rank() < 32)
{
if (blockSize >= 64) result = getSum(result, sdata[tid + 32]);
for (int offset = tile32.size() / 2; offset > 0; offset /= 2)
{
result = getSum(result, tile32.shfl_down(result, offset));
}
}
if (tid == 0) g_odata[blockIdx.x] = result;
}
template <unsigned int blockSize>
__global__ void reduceKernelSum5(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
double result = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
result = getSum(result, g_idata[i + blockDim.x]);
sdata[tid] = result;
cg::sync(cta);
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = result = getSum(result, sdata[tid + 256]);
}
cg::sync(cta);
if ((blockSize >= 256) && (tid < 128))
{
sdata[tid] = result = getSum(result, sdata[tid + 128]);
}
cg::sync(cta);
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = result = getSum(result, sdata[tid + 64]);
}
cg::sync(cta);
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
if (cta.thread_rank() < 32)
{
if (blockSize >= 64) result = getSum(result, sdata[tid + 32]);
for (int offset = tile32.size() / 2; offset > 0; offset /= 2)
{
result = getSum(result, tile32.shfl_down(result, offset));
}
}
if (tid == 0) g_odata[blockIdx.x] = result;
}
void reduce(int wichKernel, int type, int size, int threads, int blocks, double *d_idata, double *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = threads * sizeof(double);
switch (type)
{
case MAXIMUM:
switch (wichKernel)
{
case 2:
reduceKernelMax2 << <dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 3:
reduceKernelMax3 << <dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 4:
switch (threads)
{
case 512:
reduceKernelMax4<512> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 256:
reduceKernelMax4<256> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 128:
reduceKernelMax4<128> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 64:
reduceKernelMax4<64> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 32:
reduceKernelMax4<32> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 16:
reduceKernelMax4<16> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 8:
reduceKernelMax4<8> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 4:
reduceKernelMax4<4> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 2:
reduceKernelMax4<2> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 1:
reduceKernelMax4<1> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
}
break;
case 5:
switch (threads)
{
case 512:
reduceKernelMax5<512> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 256:
reduceKernelMax5<256> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 128:
reduceKernelMax5<128> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 64:
reduceKernelMax5<64> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 32:
reduceKernelMax5<32> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 16:
reduceKernelMax5<16> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 8:
reduceKernelMax5<8> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 4:
reduceKernelMax5<4> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 2:
reduceKernelMax5<2> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 1:
reduceKernelMax5<1> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
}
break;
default:
throw;
//break;
}
break;
case SUMMATION:
switch (wichKernel)
{
case 2:
reduceKernelSum2 << <dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 3:
reduceKernelSum3 << <dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 4:
switch (threads)
{
case 512:
reduceKernelSum4<512> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 256:
reduceKernelSum4<256> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 128:
reduceKernelSum4<128> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 64:
reduceKernelSum4<64> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 32:
reduceKernelSum4<32> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 16:
reduceKernelSum4<16> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 8:
reduceKernelSum4<8> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 4:
reduceKernelSum4<4> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 2:
reduceKernelSum4<2> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 1:
reduceKernelSum4<1> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
}
break;
case 5:
switch (threads)
{
case 512:
reduceKernelSum5<512> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 256:
reduceKernelSum5<256> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 128:
reduceKernelSum5<128> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 64:
reduceKernelSum5<64> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 32:
reduceKernelSum5<32> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 16:
reduceKernelSum5<16> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 8:
reduceKernelSum5<8> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 4:
reduceKernelSum5<4> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 2:
reduceKernelSum5<2> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
case 1:
reduceKernelSum5<1> << < dimGrid, dimBlock, smemSize >> > (d_idata, d_odata, size);
break;
}
break;
default:
throw;
//break;
}
break;
default:
//throw;
break;
}
}
|
a0fa42266ba4cc178deaf0fa5c35926ba91a01b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/optimizers/lars_momentum_op.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void MomentumLarsKernel(const T* p, const T* g, const T* v,
const T* learning_rate, const T mu,
const int64_t num, const T lars_coeff,
const T lars_weight_decay, const T* p_norm,
const T* g_norm, T* p_out, T* v_out) {
T lr = learning_rate[0];
T local_lr = learning_rate[0];
CUDA_KERNEL_LOOP(i, num) {
if (p_norm[0] > 0 && g_norm[0] > 0) {
local_lr = lr * lars_coeff * p_norm[0] /
(g_norm[0] + lars_weight_decay * p_norm[0]);
}
T v_new = v[i] * mu + local_lr * (g[i] + lars_weight_decay * p[i]);
v_out[i] = v_new;
p_out[i] = p[i] - v_new;
}
}
template <typename DeviceContext, typename T>
class LarsMomentumOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto param_out = ctx.Output<framework::LoDTensor>("ParamOut");
auto velocity_out = ctx.Output<framework::LoDTensor>("VelocityOut");
auto param = ctx.Input<framework::LoDTensor>("Param");
auto velocity = ctx.Input<framework::LoDTensor>("Velocity");
auto grad = ctx.Input<framework::LoDTensor>("Grad");
auto learning_rate = ctx.Input<framework::LoDTensor>("LearningRate");
T* p_out = param_out->mutable_data<T>(ctx.GetPlace());
T* v_out = velocity_out->mutable_data<T>(ctx.GetPlace());
T mu = static_cast<T>(ctx.Attr<float>("mu"));
T lars_coeff = ctx.Attr<float>("lars_coeff");
T lars_weight_decay = ctx.Attr<float>("lars_weight_decay");
auto* p = param->data<T>();
auto* v = velocity->data<T>();
auto* g = grad->data<T>();
auto* lr = learning_rate->data<T>();
int block = 512;
int grid = (param->numel() + block - 1) / block;
auto eigen_p = framework::EigenVector<T>::Flatten(*param);
auto eigen_g = framework::EigenVector<T>::Flatten(*grad);
// calculate norms using eigein and launch the kernel.
framework::Tensor p_norm_t, g_norm_t;
p_norm_t.Resize({1});
g_norm_t.Resize({1});
auto* p_norm_data = p_norm_t.mutable_data<T>(ctx.GetPlace());
auto* g_norm_data = g_norm_t.mutable_data<T>(ctx.GetPlace());
auto ep_norm = framework::EigenScalar<T>::From(p_norm_t);
auto eg_norm = framework::EigenScalar<T>::From(g_norm_t);
auto* place = ctx.template device_context<DeviceContext>().eigen_device();
ep_norm.device(*place) = eigen_p.square().sum().sqrt();
eg_norm.device(*place) = eigen_g.square().sum().sqrt();
hipLaunchKernelGGL(( MomentumLarsKernel), dim3(grid), dim3(block), 0, ctx.cuda_device_context().stream(),
p, g, v, lr, mu, param->numel(), lars_coeff, lars_weight_decay,
p_norm_data, g_norm_data, p_out, v_out);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
lars_momentum,
ops::LarsMomentumOpCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::LarsMomentumOpCUDAKernel<paddle::platform::CUDADeviceContext, double>);
| a0fa42266ba4cc178deaf0fa5c35926ba91a01b7.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/optimizers/lars_momentum_op.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void MomentumLarsKernel(const T* p, const T* g, const T* v,
const T* learning_rate, const T mu,
const int64_t num, const T lars_coeff,
const T lars_weight_decay, const T* p_norm,
const T* g_norm, T* p_out, T* v_out) {
T lr = learning_rate[0];
T local_lr = learning_rate[0];
CUDA_KERNEL_LOOP(i, num) {
if (p_norm[0] > 0 && g_norm[0] > 0) {
local_lr = lr * lars_coeff * p_norm[0] /
(g_norm[0] + lars_weight_decay * p_norm[0]);
}
T v_new = v[i] * mu + local_lr * (g[i] + lars_weight_decay * p[i]);
v_out[i] = v_new;
p_out[i] = p[i] - v_new;
}
}
template <typename DeviceContext, typename T>
class LarsMomentumOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto param_out = ctx.Output<framework::LoDTensor>("ParamOut");
auto velocity_out = ctx.Output<framework::LoDTensor>("VelocityOut");
auto param = ctx.Input<framework::LoDTensor>("Param");
auto velocity = ctx.Input<framework::LoDTensor>("Velocity");
auto grad = ctx.Input<framework::LoDTensor>("Grad");
auto learning_rate = ctx.Input<framework::LoDTensor>("LearningRate");
T* p_out = param_out->mutable_data<T>(ctx.GetPlace());
T* v_out = velocity_out->mutable_data<T>(ctx.GetPlace());
T mu = static_cast<T>(ctx.Attr<float>("mu"));
T lars_coeff = ctx.Attr<float>("lars_coeff");
T lars_weight_decay = ctx.Attr<float>("lars_weight_decay");
auto* p = param->data<T>();
auto* v = velocity->data<T>();
auto* g = grad->data<T>();
auto* lr = learning_rate->data<T>();
int block = 512;
int grid = (param->numel() + block - 1) / block;
auto eigen_p = framework::EigenVector<T>::Flatten(*param);
auto eigen_g = framework::EigenVector<T>::Flatten(*grad);
// calculate norms using eigein and launch the kernel.
framework::Tensor p_norm_t, g_norm_t;
p_norm_t.Resize({1});
g_norm_t.Resize({1});
auto* p_norm_data = p_norm_t.mutable_data<T>(ctx.GetPlace());
auto* g_norm_data = g_norm_t.mutable_data<T>(ctx.GetPlace());
auto ep_norm = framework::EigenScalar<T>::From(p_norm_t);
auto eg_norm = framework::EigenScalar<T>::From(g_norm_t);
auto* place = ctx.template device_context<DeviceContext>().eigen_device();
ep_norm.device(*place) = eigen_p.square().sum().sqrt();
eg_norm.device(*place) = eigen_g.square().sum().sqrt();
MomentumLarsKernel<<<grid, block, 0, ctx.cuda_device_context().stream()>>>(
p, g, v, lr, mu, param->numel(), lars_coeff, lars_weight_decay,
p_norm_data, g_norm_data, p_out, v_out);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
lars_momentum,
ops::LarsMomentumOpCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::LarsMomentumOpCUDAKernel<paddle::platform::CUDADeviceContext, double>);
|
c7a76e3a93de404eeb0f052fb2c58233524255b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Vector reduction example using shared memory.
* Works for small vectors that can be operated upon by a single thread block.
* Build as follows: make clean && make
* Execute as follows: ./vector_reduction
* Author: Naga Kandasamy
* Date modified: May 15, 2020
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <time.h>
#define NUM_ELEMENTS 1024
/* Include kernel */
#include "vector_reduction_kernel.hip"
void run_test(int);
extern "C" double compute_gold(float *, int);
double compute_on_device(float *, int);
void check_CUDA_error(const char *);
int main(int argc, char **argv)
{
int num_elements = NUM_ELEMENTS;
if (num_elements > 1024) {
fprintf(stderr, "Input exceeds bounds\n");
exit(EXIT_FAILURE);
}
run_test(num_elements);
exit(EXIT_SUCCESS);
}
void run_test(int num_elements)
{
int array_mem_size = sizeof(float) * num_elements;
/* Allocate memory on host to store input data */
float* h_data = (float *)malloc(array_mem_size);
/* Initialize input data to be floating-point values between [-.5, +.5] */
srand(time(NULL));
int i;
for (i = 0; i < num_elements; i++)
h_data[i] = rand()/(float)RAND_MAX - 0.5;
/* Calculate reference solution */
printf("Reducing vector on CPU\n");
double reference = compute_gold(h_data, num_elements);
printf("Answer = %f\n", reference);
/* Calculate solution on device */
printf("Reducing vector on GPU\n");
float gpu_result = compute_on_device(h_data, num_elements);
printf("Answer = %f\n", gpu_result);
/* Check for correctness */
float eps = 1e-6;
if (fabsf((reference - gpu_result)/reference) <= eps)
printf("TEST PASSED\n");
else
printf("TEST FAILED\n");
free(h_data);
exit(EXIT_SUCCESS);
}
/* Reduce vector on device */
double compute_on_device(float* h_data, int num_elements)
{
float *d_data; /* Pointer to device address holding array */
double *d_result; /* Pointer to device address holding result */
int data_size = sizeof(float) * num_elements;
/* Allocate memory on device for the array */
hipMalloc((void**)&d_data, data_size);
check_CUDA_error("Error allocating memory");
/* Copy data from host memory to device memory */
hipMemcpy(d_data, h_data, data_size, hipMemcpyHostToDevice);
check_CUDA_error("Error copying host to device memory");
/* Allocate memory on device to store the reduction result */
hipMalloc((void **)&d_result, sizeof(double));
check_CUDA_error("Error allocating memory");
/* Set up execution grid and invoke kernel */
dim3 threads(num_elements, 1, 1);
dim3 grid(1, 1);
printf("Using reduction kernel, version 1\n");
hipLaunchKernelGGL(( vector_reduction_kernel_v1), dim3(grid), dim3(threads), 0, 0, d_data, d_result, num_elements);
check_CUDA_error("Error in kernel");
printf("Using reduction kernel, version 2\n");
hipLaunchKernelGGL(( vector_reduction_kernel_v2), dim3(grid), dim3(threads), 0, 0, d_data, d_result, num_elements);
check_CUDA_error("Error in kernel");
/* Copy result from device to host */
double h_result;
hipMemcpy(&h_result, d_result, sizeof(double), hipMemcpyDeviceToHost);
check_CUDA_error("Error copying host to device memory");
/* Clean up device memory */
hipFree(d_data);
hipFree(d_result);
check_CUDA_error("Error freeing memory");
return h_result;
}
void check_CUDA_error(const char *msg)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA ERROR: %s (%s).\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
| c7a76e3a93de404eeb0f052fb2c58233524255b7.cu | /* Vector reduction example using shared memory.
* Works for small vectors that can be operated upon by a single thread block.
* Build as follows: make clean && make
* Execute as follows: ./vector_reduction
* Author: Naga Kandasamy
* Date modified: May 15, 2020
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <time.h>
#define NUM_ELEMENTS 1024
/* Include kernel */
#include "vector_reduction_kernel.cu"
void run_test(int);
extern "C" double compute_gold(float *, int);
double compute_on_device(float *, int);
void check_CUDA_error(const char *);
int main(int argc, char **argv)
{
int num_elements = NUM_ELEMENTS;
if (num_elements > 1024) {
fprintf(stderr, "Input exceeds bounds\n");
exit(EXIT_FAILURE);
}
run_test(num_elements);
exit(EXIT_SUCCESS);
}
void run_test(int num_elements)
{
int array_mem_size = sizeof(float) * num_elements;
/* Allocate memory on host to store input data */
float* h_data = (float *)malloc(array_mem_size);
/* Initialize input data to be floating-point values between [-.5, +.5] */
srand(time(NULL));
int i;
for (i = 0; i < num_elements; i++)
h_data[i] = rand()/(float)RAND_MAX - 0.5;
/* Calculate reference solution */
printf("Reducing vector on CPU\n");
double reference = compute_gold(h_data, num_elements);
printf("Answer = %f\n", reference);
/* Calculate solution on device */
printf("Reducing vector on GPU\n");
float gpu_result = compute_on_device(h_data, num_elements);
printf("Answer = %f\n", gpu_result);
/* Check for correctness */
float eps = 1e-6;
if (fabsf((reference - gpu_result)/reference) <= eps)
printf("TEST PASSED\n");
else
printf("TEST FAILED\n");
free(h_data);
exit(EXIT_SUCCESS);
}
/* Reduce vector on device */
double compute_on_device(float* h_data, int num_elements)
{
float *d_data; /* Pointer to device address holding array */
double *d_result; /* Pointer to device address holding result */
int data_size = sizeof(float) * num_elements;
/* Allocate memory on device for the array */
cudaMalloc((void**)&d_data, data_size);
check_CUDA_error("Error allocating memory");
/* Copy data from host memory to device memory */
cudaMemcpy(d_data, h_data, data_size, cudaMemcpyHostToDevice);
check_CUDA_error("Error copying host to device memory");
/* Allocate memory on device to store the reduction result */
cudaMalloc((void **)&d_result, sizeof(double));
check_CUDA_error("Error allocating memory");
/* Set up execution grid and invoke kernel */
dim3 threads(num_elements, 1, 1);
dim3 grid(1, 1);
printf("Using reduction kernel, version 1\n");
vector_reduction_kernel_v1<<<grid, threads>>>(d_data, d_result, num_elements);
check_CUDA_error("Error in kernel");
printf("Using reduction kernel, version 2\n");
vector_reduction_kernel_v2<<<grid, threads>>>(d_data, d_result, num_elements);
check_CUDA_error("Error in kernel");
/* Copy result from device to host */
double h_result;
cudaMemcpy(&h_result, d_result, sizeof(double), cudaMemcpyDeviceToHost);
check_CUDA_error("Error copying host to device memory");
/* Clean up device memory */
cudaFree(d_data);
cudaFree(d_result);
check_CUDA_error("Error freeing memory");
return h_result;
}
void check_CUDA_error(const char *msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA ERROR: %s (%s).\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
80b064307ad362af49be8bd0ebe5b30de4def169.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <tests/strings/utilities.h>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/execution_policy.h>
#include <thrust/transform.h>
#include <cstring>
#include <vector>
struct StringsFactoriesTest : public cudf::test::BaseFixture {
};
TEST_F(StringsFactoriesTest, CreateColumnFromPair)
{
std::vector<const char*> h_test_strings{"the quick brown fox jumps over the lazy dog",
"the fat cat lays next to the other accnted cat",
"a slow moving turtl cannot catch the bird",
"which can be composd together to form a more complete",
"th result does not include the value in the sum in",
"",
nullptr,
"absent stop words"};
cudf::size_type memsize = 0;
for (auto itr = h_test_strings.begin(); itr != h_test_strings.end(); ++itr)
memsize += *itr ? (cudf::size_type)strlen(*itr) : 0;
cudf::size_type count = (cudf::size_type)h_test_strings.size();
thrust::host_vector<char> h_buffer(memsize);
rmm::device_uvector<char> d_buffer(memsize, rmm::cuda_stream_default);
thrust::host_vector<thrust::pair<const char*, cudf::size_type>> strings(count);
thrust::host_vector<cudf::size_type> h_offsets(count + 1);
cudf::size_type offset = 0;
cudf::size_type nulls = 0;
h_offsets[0] = 0;
for (cudf::size_type idx = 0; idx < count; ++idx) {
const char* str = h_test_strings[idx];
if (!str) {
strings[idx] = thrust::pair<const char*, cudf::size_type>{nullptr, 0};
nulls++;
} else {
auto length = (cudf::size_type)strlen(str);
memcpy(h_buffer.data() + offset, str, length);
strings[idx] = thrust::pair<const char*, cudf::size_type>{d_buffer.data() + offset, length};
offset += length;
}
h_offsets[idx + 1] = offset;
}
auto d_strings = cudf::detail::make_device_uvector_sync(strings);
CUDA_TRY(hipMemcpy(d_buffer.data(), h_buffer.data(), memsize, hipMemcpyHostToDevice));
auto column = cudf::make_strings_column(d_strings);
EXPECT_EQ(column->type(), cudf::data_type{cudf::type_id::STRING});
EXPECT_EQ(column->null_count(), nulls);
if (nulls) {
EXPECT_TRUE(column->nullable());
EXPECT_TRUE(column->has_nulls());
}
EXPECT_EQ(2, column->num_children());
cudf::strings_column_view strings_view(column->view());
EXPECT_EQ(strings_view.size(), count);
EXPECT_EQ(strings_view.offsets().size(), count + 1);
EXPECT_EQ(strings_view.chars().size(), memsize);
// check string data
auto h_chars_data = cudf::detail::make_std_vector_sync(
cudf::device_span<char const>(strings_view.chars().data<char>(), strings_view.chars().size()),
rmm::cuda_stream_default);
auto h_offsets_data = cudf::detail::make_std_vector_sync(
cudf::device_span<cudf::offset_type const>(
strings_view.offsets().data<cudf::offset_type>() + strings_view.offset(),
strings_view.size() + 1),
rmm::cuda_stream_default);
EXPECT_EQ(memcmp(h_buffer.data(), h_chars_data.data(), h_buffer.size()), 0);
EXPECT_EQ(
memcmp(h_offsets.data(), h_offsets_data.data(), h_offsets.size() * sizeof(cudf::size_type)), 0);
}
TEST_F(StringsFactoriesTest, CreateColumnFromOffsets)
{
std::vector<const char*> h_test_strings{"the quick brown fox jumps over the lazy dog",
"the fat cat lays next to the other accnted cat",
"a slow moving turtl cannot catch the bird",
"which can be composd together to form a more complete",
"th result does not include the value in the sum in",
"",
nullptr,
"absent stop words"};
cudf::size_type memsize = 0;
for (auto itr = h_test_strings.begin(); itr != h_test_strings.end(); ++itr)
memsize += *itr ? (cudf::size_type)strlen(*itr) : 0;
cudf::size_type count = (cudf::size_type)h_test_strings.size();
std::vector<char> h_buffer(memsize);
std::vector<cudf::size_type> h_offsets(count + 1);
cudf::size_type offset = 0;
h_offsets[0] = offset;
cudf::bitmask_type h_null_mask = 0;
cudf::size_type null_count = 0;
for (cudf::size_type idx = 0; idx < count; ++idx) {
h_null_mask = (h_null_mask << 1);
const char* str = h_test_strings[idx];
if (str) {
auto length = (cudf::size_type)strlen(str);
memcpy(h_buffer.data() + offset, str, length);
offset += length;
h_null_mask |= 1;
} else
null_count++;
h_offsets[idx + 1] = offset;
}
std::vector<cudf::bitmask_type> h_nulls{h_null_mask};
auto d_buffer = cudf::detail::make_device_uvector_sync(h_buffer);
auto d_offsets = cudf::detail::make_device_uvector_sync(h_offsets);
auto d_nulls = cudf::detail::make_device_uvector_sync(h_nulls);
auto column = cudf::make_strings_column(d_buffer, d_offsets, d_nulls, null_count);
EXPECT_EQ(column->type(), cudf::data_type{cudf::type_id::STRING});
EXPECT_EQ(column->null_count(), null_count);
EXPECT_EQ(2, column->num_children());
cudf::strings_column_view strings_view(column->view());
EXPECT_EQ(strings_view.size(), count);
EXPECT_EQ(strings_view.offsets().size(), count + 1);
EXPECT_EQ(strings_view.chars().size(), memsize);
// check string data
auto h_chars_data = cudf::detail::make_std_vector_sync(
cudf::device_span<char const>(strings_view.chars().data<char>(), strings_view.chars().size()),
rmm::cuda_stream_default);
auto h_offsets_data = cudf::detail::make_std_vector_sync(
cudf::device_span<cudf::offset_type const>(
strings_view.offsets().data<cudf::offset_type>() + strings_view.offset(),
strings_view.size() + 1),
rmm::cuda_stream_default);
EXPECT_EQ(memcmp(h_buffer.data(), h_chars_data.data(), h_buffer.size()), 0);
EXPECT_EQ(
memcmp(h_offsets.data(), h_offsets_data.data(), h_offsets.size() * sizeof(cudf::size_type)), 0);
}
TEST_F(StringsFactoriesTest, CreateScalar)
{
std::string value = "test string";
auto s = cudf::make_string_scalar(value);
auto string_s = static_cast<cudf::string_scalar*>(s.get());
EXPECT_EQ(string_s->to_string(), value);
EXPECT_TRUE(string_s->is_valid());
EXPECT_TRUE(s->is_valid());
}
TEST_F(StringsFactoriesTest, EmptyStringsColumn)
{
rmm::device_uvector<char> d_chars{0, rmm::cuda_stream_default};
auto d_offsets = cudf::detail::make_zeroed_device_uvector_sync<cudf::size_type>(1);
rmm::device_uvector<cudf::bitmask_type> d_nulls{0, rmm::cuda_stream_default};
auto results = cudf::make_strings_column(d_chars, d_offsets, d_nulls, 0);
cudf::test::expect_strings_empty(results->view());
rmm::device_uvector<thrust::pair<const char*, cudf::size_type>> d_strings{
0, rmm::cuda_stream_default};
results = cudf::make_strings_column(d_strings);
cudf::test::expect_strings_empty(results->view());
}
namespace {
using string_pair = thrust::pair<char const*, cudf::size_type>;
struct string_view_to_pair {
__device__ string_pair operator()(thrust::pair<cudf::string_view, bool> const& p)
{
return (p.second) ? string_pair{p.first.data(), p.first.size_bytes()} : string_pair{nullptr, 0};
}
};
} // namespace
TEST_F(StringsFactoriesTest, StringPairWithNullsAndEmpty)
{
cudf::test::strings_column_wrapper data(
{"", "this", "is", "", "a", "", "column", "of", "strings", "", ""},
{0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1});
auto d_column = cudf::column_device_view::create(data);
rmm::device_uvector<string_pair> pairs(d_column->size(), rmm::cuda_stream_default);
thrust::transform(thrust::device,
d_column->pair_begin<cudf::string_view, true>(),
d_column->pair_end<cudf::string_view, true>(),
pairs.data(),
string_view_to_pair{});
auto result = cudf::make_strings_column(pairs);
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(result->view(), data);
}
| 80b064307ad362af49be8bd0ebe5b30de4def169.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <tests/strings/utilities.h>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/execution_policy.h>
#include <thrust/transform.h>
#include <cstring>
#include <vector>
struct StringsFactoriesTest : public cudf::test::BaseFixture {
};
TEST_F(StringsFactoriesTest, CreateColumnFromPair)
{
std::vector<const char*> h_test_strings{"the quick brown fox jumps over the lazy dog",
"the fat cat lays next to the other accénted cat",
"a slow moving turtlé cannot catch the bird",
"which can be composéd together to form a more complete",
"thé result does not include the value in the sum in",
"",
nullptr,
"absent stop words"};
cudf::size_type memsize = 0;
for (auto itr = h_test_strings.begin(); itr != h_test_strings.end(); ++itr)
memsize += *itr ? (cudf::size_type)strlen(*itr) : 0;
cudf::size_type count = (cudf::size_type)h_test_strings.size();
thrust::host_vector<char> h_buffer(memsize);
rmm::device_uvector<char> d_buffer(memsize, rmm::cuda_stream_default);
thrust::host_vector<thrust::pair<const char*, cudf::size_type>> strings(count);
thrust::host_vector<cudf::size_type> h_offsets(count + 1);
cudf::size_type offset = 0;
cudf::size_type nulls = 0;
h_offsets[0] = 0;
for (cudf::size_type idx = 0; idx < count; ++idx) {
const char* str = h_test_strings[idx];
if (!str) {
strings[idx] = thrust::pair<const char*, cudf::size_type>{nullptr, 0};
nulls++;
} else {
auto length = (cudf::size_type)strlen(str);
memcpy(h_buffer.data() + offset, str, length);
strings[idx] = thrust::pair<const char*, cudf::size_type>{d_buffer.data() + offset, length};
offset += length;
}
h_offsets[idx + 1] = offset;
}
auto d_strings = cudf::detail::make_device_uvector_sync(strings);
CUDA_TRY(cudaMemcpy(d_buffer.data(), h_buffer.data(), memsize, cudaMemcpyHostToDevice));
auto column = cudf::make_strings_column(d_strings);
EXPECT_EQ(column->type(), cudf::data_type{cudf::type_id::STRING});
EXPECT_EQ(column->null_count(), nulls);
if (nulls) {
EXPECT_TRUE(column->nullable());
EXPECT_TRUE(column->has_nulls());
}
EXPECT_EQ(2, column->num_children());
cudf::strings_column_view strings_view(column->view());
EXPECT_EQ(strings_view.size(), count);
EXPECT_EQ(strings_view.offsets().size(), count + 1);
EXPECT_EQ(strings_view.chars().size(), memsize);
// check string data
auto h_chars_data = cudf::detail::make_std_vector_sync(
cudf::device_span<char const>(strings_view.chars().data<char>(), strings_view.chars().size()),
rmm::cuda_stream_default);
auto h_offsets_data = cudf::detail::make_std_vector_sync(
cudf::device_span<cudf::offset_type const>(
strings_view.offsets().data<cudf::offset_type>() + strings_view.offset(),
strings_view.size() + 1),
rmm::cuda_stream_default);
EXPECT_EQ(memcmp(h_buffer.data(), h_chars_data.data(), h_buffer.size()), 0);
EXPECT_EQ(
memcmp(h_offsets.data(), h_offsets_data.data(), h_offsets.size() * sizeof(cudf::size_type)), 0);
}
TEST_F(StringsFactoriesTest, CreateColumnFromOffsets)
{
std::vector<const char*> h_test_strings{"the quick brown fox jumps over the lazy dog",
"the fat cat lays next to the other accénted cat",
"a slow moving turtlé cannot catch the bird",
"which can be composéd together to form a more complete",
"thé result does not include the value in the sum in",
"",
nullptr,
"absent stop words"};
cudf::size_type memsize = 0;
for (auto itr = h_test_strings.begin(); itr != h_test_strings.end(); ++itr)
memsize += *itr ? (cudf::size_type)strlen(*itr) : 0;
cudf::size_type count = (cudf::size_type)h_test_strings.size();
std::vector<char> h_buffer(memsize);
std::vector<cudf::size_type> h_offsets(count + 1);
cudf::size_type offset = 0;
h_offsets[0] = offset;
cudf::bitmask_type h_null_mask = 0;
cudf::size_type null_count = 0;
for (cudf::size_type idx = 0; idx < count; ++idx) {
h_null_mask = (h_null_mask << 1);
const char* str = h_test_strings[idx];
if (str) {
auto length = (cudf::size_type)strlen(str);
memcpy(h_buffer.data() + offset, str, length);
offset += length;
h_null_mask |= 1;
} else
null_count++;
h_offsets[idx + 1] = offset;
}
std::vector<cudf::bitmask_type> h_nulls{h_null_mask};
auto d_buffer = cudf::detail::make_device_uvector_sync(h_buffer);
auto d_offsets = cudf::detail::make_device_uvector_sync(h_offsets);
auto d_nulls = cudf::detail::make_device_uvector_sync(h_nulls);
auto column = cudf::make_strings_column(d_buffer, d_offsets, d_nulls, null_count);
EXPECT_EQ(column->type(), cudf::data_type{cudf::type_id::STRING});
EXPECT_EQ(column->null_count(), null_count);
EXPECT_EQ(2, column->num_children());
cudf::strings_column_view strings_view(column->view());
EXPECT_EQ(strings_view.size(), count);
EXPECT_EQ(strings_view.offsets().size(), count + 1);
EXPECT_EQ(strings_view.chars().size(), memsize);
// check string data
auto h_chars_data = cudf::detail::make_std_vector_sync(
cudf::device_span<char const>(strings_view.chars().data<char>(), strings_view.chars().size()),
rmm::cuda_stream_default);
auto h_offsets_data = cudf::detail::make_std_vector_sync(
cudf::device_span<cudf::offset_type const>(
strings_view.offsets().data<cudf::offset_type>() + strings_view.offset(),
strings_view.size() + 1),
rmm::cuda_stream_default);
EXPECT_EQ(memcmp(h_buffer.data(), h_chars_data.data(), h_buffer.size()), 0);
EXPECT_EQ(
memcmp(h_offsets.data(), h_offsets_data.data(), h_offsets.size() * sizeof(cudf::size_type)), 0);
}
TEST_F(StringsFactoriesTest, CreateScalar)
{
std::string value = "test string";
auto s = cudf::make_string_scalar(value);
auto string_s = static_cast<cudf::string_scalar*>(s.get());
EXPECT_EQ(string_s->to_string(), value);
EXPECT_TRUE(string_s->is_valid());
EXPECT_TRUE(s->is_valid());
}
TEST_F(StringsFactoriesTest, EmptyStringsColumn)
{
rmm::device_uvector<char> d_chars{0, rmm::cuda_stream_default};
auto d_offsets = cudf::detail::make_zeroed_device_uvector_sync<cudf::size_type>(1);
rmm::device_uvector<cudf::bitmask_type> d_nulls{0, rmm::cuda_stream_default};
auto results = cudf::make_strings_column(d_chars, d_offsets, d_nulls, 0);
cudf::test::expect_strings_empty(results->view());
rmm::device_uvector<thrust::pair<const char*, cudf::size_type>> d_strings{
0, rmm::cuda_stream_default};
results = cudf::make_strings_column(d_strings);
cudf::test::expect_strings_empty(results->view());
}
namespace {
using string_pair = thrust::pair<char const*, cudf::size_type>;
struct string_view_to_pair {
__device__ string_pair operator()(thrust::pair<cudf::string_view, bool> const& p)
{
return (p.second) ? string_pair{p.first.data(), p.first.size_bytes()} : string_pair{nullptr, 0};
}
};
} // namespace
TEST_F(StringsFactoriesTest, StringPairWithNullsAndEmpty)
{
cudf::test::strings_column_wrapper data(
{"", "this", "is", "", "a", "", "column", "of", "strings", "", ""},
{0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1});
auto d_column = cudf::column_device_view::create(data);
rmm::device_uvector<string_pair> pairs(d_column->size(), rmm::cuda_stream_default);
thrust::transform(thrust::device,
d_column->pair_begin<cudf::string_view, true>(),
d_column->pair_end<cudf::string_view, true>(),
pairs.data(),
string_view_to_pair{});
auto result = cudf::make_strings_column(pairs);
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(result->view(), data);
}
|
f3f39a57db42c43b5d04e42de441368f54631303.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2021 by Contributors
* \file graph/sampling/frequency_hashmap.cu
* \brief frequency hashmap - used to select top-k frequency edges of each node
*/
#include <algorithm>
#include <tuple>
#include <utility>
#include "../../../runtime/cuda/cuda_common.h"
#include "../../../array/cuda/atomic.cuh"
#include "../../../array/cuda/dgl_cub.cuh"
#include "frequency_hashmap.cuh"
namespace dgl {
namespace sampling {
namespace impl {
namespace {
int64_t _table_size(const int64_t num, const int64_t scale) {
/**
* Calculate the number of buckets in the hashtable. To guarantee we can
* fill the hashtable in the worst case, we must use a number of buckets which
* is a power of two.
* https://en.wikipedia.org/wiki/Quadratic_probing#Limitations
*/
const int64_t next_pow2 = 1 << static_cast<int64_t>(1 + std::log2(num >> 1));
return next_pow2 << scale;
}
template<typename IdxType, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _init_edge_table(void *edge_hashmap, int64_t edges_len) {
using EdgeItem = typename DeviceEdgeHashmap<IdxType>::EdgeItem;
auto edge_hashmap_t = static_cast<EdgeItem*>(edge_hashmap);
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
#pragma unroll(4)
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
if (idx < edges_len) {
EdgeItem *edge = (edge_hashmap_t + idx);
edge->src = static_cast<IdxType>(-1);
edge->cnt = static_cast<IdxType>(0);
}
}
}
template<typename IdxType, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _count_frequency(const IdxType *src_data,
const int64_t num_edges, const int64_t num_edges_per_node,
IdxType *edge_blocks_prefix, bool *is_first_position,
DeviceEdgeHashmap<IdxType> device_edge_hashmap) {
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
IdxType count = 0;
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
if (idx < num_edges) {
IdxType src = src_data[idx];
if (src == static_cast<IdxType>(-1)) {
continue;
}
IdxType dst_idx = (idx / num_edges_per_node);
if (device_edge_hashmap.InsertEdge(src, dst_idx) == 0) {
is_first_position[idx] = true;
++count;
}
}
}
using BlockReduce = typename hipcub::BlockReduce<IdxType, BLOCK_SIZE>;
__shared__ typename BlockReduce::TempStorage temp_space;
count = BlockReduce(temp_space).Sum(count);
if (threadIdx.x == 0) {
edge_blocks_prefix[blockIdx.x] = count;
if (blockIdx.x == 0) {
edge_blocks_prefix[gridDim.x] = 0;
}
}
}
/**
* This structure is used with cub's block-level prefixscan in order to
* keep a running sum as items are iteratively processed.
*/
template <typename T>
struct BlockPrefixCallbackOp {
T _running_total;
__device__ BlockPrefixCallbackOp(const T running_total)
: _running_total(running_total) {}
__device__ T operator()(const T block_aggregate) {
const T old_prefix = _running_total;
_running_total += block_aggregate;
return old_prefix;
}
};
template<typename IdxType, typename Idx64Type, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _compact_frequency(const IdxType *src_data, const IdxType *dst_data,
const int64_t num_edges, const int64_t num_edges_per_node,
const IdxType *edge_blocks_prefix, const bool *is_first_position,
IdxType *num_unique_each_node,
IdxType *unique_src_edges, Idx64Type *unique_frequency,
DeviceEdgeHashmap<IdxType> device_edge_hashmap) {
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
const IdxType block_offset = edge_blocks_prefix[blockIdx.x];
using BlockScan = typename hipcub::BlockScan<IdxType, BLOCK_SIZE>;
__shared__ typename BlockScan::TempStorage temp_space;
BlockPrefixCallbackOp<IdxType> prefix_op(0);
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
IdxType flag = 0;
if (idx < num_edges) {
IdxType src = src_data[idx];
IdxType dst_idx = (idx / num_edges_per_node);
if (idx % num_edges_per_node == 0) {
num_unique_each_node[dst_idx] = device_edge_hashmap.GetDstCount(dst_idx);
}
if (is_first_position[idx] == true) {
flag = 1;
}
BlockScan(temp_space).ExclusiveSum(flag, flag, prefix_op);
__syncthreads();
if (is_first_position[idx] == true) {
const IdxType pos = (block_offset + flag);
unique_src_edges[pos] = src;
if (sizeof(IdxType) != sizeof(Idx64Type)
&& sizeof(IdxType) == 4) { // if IdxType is a 32-bit data
unique_frequency[pos] = (
(static_cast<Idx64Type>(num_edges / num_edges_per_node - dst_idx) << 32)
| device_edge_hashmap.GetEdgeCount(src, dst_idx));
} else {
unique_frequency[pos] = device_edge_hashmap.GetEdgeCount(src, dst_idx);
}
}
}
}
}
template<typename IdxType, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _get_pick_num(IdxType *num_unique_each_node,
const int64_t num_pick, const int64_t num_dst_nodes) {
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
#pragma unroll(4)
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
if (idx < num_dst_nodes) {
IdxType &num_unique = num_unique_each_node[idx];
num_unique = min(num_unique, static_cast<IdxType>(num_pick));
}
}
}
template<typename IdxType, typename Idx64Type, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _pick_data(const Idx64Type *unique_frequency, const IdxType *unique_src_edges,
const IdxType *unique_input_offsets, const IdxType *dst_data,
const int64_t num_edges_per_node, const int64_t num_dst_nodes,
const int64_t num_edges,
const IdxType *unique_output_offsets,
IdxType *output_src, IdxType *output_dst, IdxType *output_frequency) {
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
if (idx < num_dst_nodes) {
const int64_t dst_pos = (idx * num_edges_per_node);
assert(dst_pos < num_edges);
const IdxType dst = dst_data[dst_pos];
const IdxType last_output_offset = unique_output_offsets[idx + 1];
assert((last_output_offset - unique_output_offsets[idx]) <=
(unique_input_offsets[idx + 1] - unique_input_offsets[idx]));
for (IdxType output_idx = unique_output_offsets[idx], input_idx = unique_input_offsets[idx];
output_idx < last_output_offset; ++output_idx, ++input_idx) {
output_src[output_idx] = unique_src_edges[input_idx];
output_dst[output_idx] = dst;
output_frequency[output_idx] = static_cast<IdxType>(unique_frequency[input_idx]);
}
}
}
}
} // namespace
// return the old cnt of this edge
template<typename IdxType>
inline __device__ IdxType DeviceEdgeHashmap<IdxType>::InsertEdge(
const IdxType &src, const IdxType &dst_idx) {
IdxType start_off = dst_idx * _num_items_each_dst;
IdxType pos = EdgeHash(src);
IdxType delta = 1;
IdxType old_cnt = static_cast<IdxType>(-1);
while (true) {
IdxType old_src = dgl::aten::cuda::AtomicCAS(
&_edge_hashmap[start_off + pos].src, static_cast<IdxType>(-1), src);
if (old_src == static_cast<IdxType>(-1) || old_src == src) {
// first insert
old_cnt = dgl::aten::cuda::AtomicAdd(
&_edge_hashmap[start_off + pos].cnt, static_cast<IdxType>(1));
if (old_src == static_cast<IdxType>(-1)) {
assert(dst_idx < _num_dst);
dgl::aten::cuda::AtomicAdd(&_dst_unique_edges[dst_idx], static_cast<IdxType>(1));
}
break;
}
pos = EdgeHash(pos + delta);
delta += 1;
}
return old_cnt;
}
template<typename IdxType>
inline __device__ IdxType DeviceEdgeHashmap<IdxType>::GetDstCount(const IdxType &dst_idx) {
return _dst_unique_edges[dst_idx];
}
template<typename IdxType>
inline __device__ IdxType DeviceEdgeHashmap<IdxType>::GetEdgeCount(
const IdxType &src, const IdxType &dst_idx) {
IdxType start_off = dst_idx * _num_items_each_dst;
IdxType pos = EdgeHash(src);
IdxType delta = 1;
while (_edge_hashmap[start_off + pos].src != src) {
pos = EdgeHash(pos + delta);
delta += 1;
}
return _edge_hashmap[start_off + pos].cnt;
}
template <typename IdxType>
FrequencyHashmap<IdxType>::FrequencyHashmap(
int64_t num_dst, int64_t num_items_each_dst, DGLContext ctx,
hipStream_t stream, int64_t edge_table_scale) {
_ctx = ctx;
_stream = stream;
num_items_each_dst = _table_size(num_items_each_dst, edge_table_scale);
auto device = dgl::runtime::DeviceAPI::Get(_ctx);
auto dst_unique_edges = static_cast<IdxType*>(
device->AllocWorkspace(_ctx, (num_dst) * sizeof(IdxType)));
auto edge_hashmap = static_cast<EdgeItem*>(
device->AllocWorkspace(_ctx, (num_dst * num_items_each_dst) * sizeof(EdgeItem)));
constexpr int BLOCK_SIZE = 256;
constexpr int TILE_SIZE = BLOCK_SIZE * 8;
dim3 block(BLOCK_SIZE);
dim3 grid((num_dst * num_items_each_dst + TILE_SIZE - 1) / TILE_SIZE);
CUDA_CALL(hipMemset(dst_unique_edges, 0, (num_dst) * sizeof(IdxType)));
CUDA_KERNEL_CALL((_init_edge_table<IdxType, BLOCK_SIZE, TILE_SIZE>),
grid, block, 0, _stream,
edge_hashmap, (num_dst * num_items_each_dst));
_device_edge_hashmap = new DeviceEdgeHashmap<IdxType>(
num_dst, num_items_each_dst, dst_unique_edges, edge_hashmap);
_dst_unique_edges = dst_unique_edges;
_edge_hashmap = edge_hashmap;
}
template <typename IdxType>
FrequencyHashmap<IdxType>::~FrequencyHashmap() {
auto device = dgl::runtime::DeviceAPI::Get(_ctx);
delete _device_edge_hashmap;
_device_edge_hashmap = nullptr;
device->FreeWorkspace(_ctx, _dst_unique_edges);
_dst_unique_edges = nullptr;
device->FreeWorkspace(_ctx, _edge_hashmap);
_edge_hashmap = nullptr;
}
template <typename IdxType>
std::tuple<IdArray, IdArray, IdArray> FrequencyHashmap<IdxType>::Topk(
const IdxType *src_data, const IdxType *dst_data, DLDataType dtype,
const int64_t num_edges, const int64_t num_edges_per_node,
const int64_t num_pick) {
using Idx64Type = int64_t;
const int64_t num_dst_nodes = (num_edges / num_edges_per_node);
constexpr int BLOCK_SIZE = 256;
// XXX: a experienced value, best performance in GV100
constexpr int TILE_SIZE = BLOCK_SIZE * 32;
const dim3 block(BLOCK_SIZE);
const dim3 edges_grid((num_edges + TILE_SIZE - 1) / TILE_SIZE);
auto device = dgl::runtime::DeviceAPI::Get(_ctx);
const IdxType num_edge_blocks = static_cast<IdxType>(edges_grid.x);
IdxType num_unique_edges = 0;
// to mark if this position of edges is the first inserting position for _edge_hashmap
bool *is_first_position = static_cast<bool*>(
device->AllocWorkspace(_ctx, sizeof(bool) * (num_edges)));
CUDA_CALL(hipMemset(is_first_position, 0, sizeof(bool) * (num_edges)));
// double space to use ExclusiveSum
auto edge_blocks_prefix_data = static_cast<IdxType*>(
device->AllocWorkspace(_ctx, 2 * sizeof(IdxType) * (num_edge_blocks + 1)));
IdxType *edge_blocks_prefix = edge_blocks_prefix_data;
IdxType *edge_blocks_prefix_alternate = (edge_blocks_prefix_data + (num_edge_blocks + 1));
// triple space to use ExclusiveSum and unique_output_offsets
auto num_unique_each_node_data = static_cast<IdxType*>(
device->AllocWorkspace(_ctx, 3 * sizeof(IdxType) * (num_dst_nodes + 1)));
IdxType *num_unique_each_node = num_unique_each_node_data;
IdxType *num_unique_each_node_alternate = (num_unique_each_node_data + (num_dst_nodes + 1));
IdxType *unique_output_offsets = (num_unique_each_node_data + 2 * (num_dst_nodes + 1));
// 1. Scan the all edges and count the unique edges and unique edges for each dst node
CUDA_KERNEL_CALL((_count_frequency<IdxType, BLOCK_SIZE, TILE_SIZE>),
edges_grid, block, 0, _stream,
src_data, num_edges, num_edges_per_node,
edge_blocks_prefix, is_first_position, *_device_edge_hashmap);
// 2. Compact the unique edges frequency
// 2.1 ExclusiveSum the edge_blocks_prefix
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
edge_blocks_prefix, edge_blocks_prefix_alternate, num_edge_blocks + 1));
d_temp_storage = device->AllocWorkspace(_ctx, temp_storage_bytes);
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
edge_blocks_prefix, edge_blocks_prefix_alternate, num_edge_blocks + 1));
device->FreeWorkspace(_ctx, d_temp_storage);
std::swap(edge_blocks_prefix, edge_blocks_prefix_alternate);
device->CopyDataFromTo(&edge_blocks_prefix[num_edge_blocks], 0, &num_unique_edges, 0,
sizeof(num_unique_edges),
_ctx, DGLContext{kDLCPU, 0},
dtype, _stream);
device->StreamSync(_ctx, _stream);
// 2.2 Allocate the data of unique edges and frequency
// double space to use SegmentedRadixSort
auto unique_src_edges_data = static_cast<IdxType*>(
device->AllocWorkspace(_ctx, 2 * sizeof(IdxType) * (num_unique_edges)));
IdxType *unique_src_edges = unique_src_edges_data;
IdxType *unique_src_edges_alternate = unique_src_edges_data + num_unique_edges;
// double space to use SegmentedRadixSort
auto unique_frequency_data = static_cast<Idx64Type*>(
device->AllocWorkspace(_ctx, 2 * sizeof(Idx64Type) * (num_unique_edges)));
Idx64Type *unique_frequency = unique_frequency_data;
Idx64Type *unique_frequency_alternate = unique_frequency_data + num_unique_edges;
// 2.3 Compact the unique edges and their frequency
CUDA_KERNEL_CALL((_compact_frequency<IdxType, Idx64Type, BLOCK_SIZE, TILE_SIZE>),
edges_grid, block, 0, _stream,
src_data, dst_data, num_edges, num_edges_per_node,
edge_blocks_prefix, is_first_position, num_unique_each_node,
unique_src_edges, unique_frequency, *_device_edge_hashmap);
// 3. SegmentedRadixSort the unique edges and unique_frequency
// 3.1 ExclusiveSum the num_unique_each_node
d_temp_storage = nullptr;
temp_storage_bytes = 0;
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
num_unique_each_node, num_unique_each_node_alternate, num_dst_nodes + 1));
d_temp_storage = device->AllocWorkspace(_ctx, temp_storage_bytes);
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
num_unique_each_node, num_unique_each_node_alternate, num_dst_nodes + 1));
device->FreeWorkspace(_ctx, d_temp_storage);
// 3.2 SegmentedRadixSort the unique_src_edges and unique_frequency
// Create a set of DoubleBuffers to wrap pairs of device pointers
cub::DoubleBuffer<Idx64Type> d_unique_frequency(unique_frequency, unique_frequency_alternate);
cub::DoubleBuffer<IdxType> d_unique_src_edges(unique_src_edges, unique_src_edges_alternate);
// Determine temporary device storage requirements
d_temp_storage = nullptr;
temp_storage_bytes = 0;
// the DeviceRadixSort is faster than DeviceSegmentedRadixSort,
// especially when num_dst_nodes is large (about ~10000)
if (dtype.bits == 32) {
CUDA_CALL(hipcub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes,
d_unique_frequency, d_unique_src_edges, num_unique_edges));
} else {
CUDA_CALL(hipcub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes,
d_unique_frequency, d_unique_src_edges, num_unique_edges, num_dst_nodes,
num_unique_each_node_alternate, num_unique_each_node_alternate + 1));
}
d_temp_storage = device->AllocWorkspace(_ctx, temp_storage_bytes);
if (dtype.bits == 32) {
CUDA_CALL(hipcub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes,
d_unique_frequency, d_unique_src_edges, num_unique_edges));
} else {
CUDA_CALL(hipcub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes,
d_unique_frequency, d_unique_src_edges, num_unique_edges, num_dst_nodes,
num_unique_each_node_alternate, num_unique_each_node_alternate + 1));
}
device->FreeWorkspace(_ctx, d_temp_storage);
// 4. Get the final pick number for each dst node
// 4.1 Reset the min(num_pick, num_unique_each_node) to num_unique_each_node
constexpr int NODE_TILE_SIZE = BLOCK_SIZE * 2;
const dim3 nodes_grid((num_dst_nodes + NODE_TILE_SIZE - 1) / NODE_TILE_SIZE);
CUDA_KERNEL_CALL((_get_pick_num<IdxType, BLOCK_SIZE, NODE_TILE_SIZE>),
nodes_grid, block, 0, _stream,
num_unique_each_node, num_pick, num_dst_nodes);
// 4.2 ExclusiveSum the new num_unique_each_node as unique_output_offsets
// use unique_output_offsets;
d_temp_storage = nullptr;
temp_storage_bytes = 0;
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
num_unique_each_node, unique_output_offsets, num_dst_nodes + 1));
d_temp_storage = device->AllocWorkspace(_ctx, temp_storage_bytes);
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
num_unique_each_node, unique_output_offsets, num_dst_nodes + 1));
device->FreeWorkspace(_ctx, d_temp_storage);
// 5. Pick the data to result
IdxType num_output = 0;
device->CopyDataFromTo(&unique_output_offsets[num_dst_nodes], 0, &num_output, 0,
sizeof(num_output),
_ctx, DGLContext{kDLCPU, 0},
dtype, _stream);
device->StreamSync(_ctx, _stream);
IdArray res_src = IdArray::Empty({static_cast<int64_t>(num_output)},
dtype, _ctx);
IdArray res_dst = IdArray::Empty({static_cast<int64_t>(num_output)},
dtype, _ctx);
IdArray res_cnt = IdArray::Empty({static_cast<int64_t>(num_output)},
dtype, _ctx);
CUDA_KERNEL_CALL((_pick_data<IdxType, Idx64Type, BLOCK_SIZE, NODE_TILE_SIZE>),
nodes_grid, block, 0, _stream,
d_unique_frequency.Current(), d_unique_src_edges.Current(), num_unique_each_node_alternate,
dst_data, num_edges_per_node, num_dst_nodes, num_edges,
unique_output_offsets,
res_src.Ptr<IdxType>(), res_dst.Ptr<IdxType>(), res_cnt.Ptr<IdxType>());
device->FreeWorkspace(_ctx, is_first_position);
device->FreeWorkspace(_ctx, edge_blocks_prefix_data);
device->FreeWorkspace(_ctx, num_unique_each_node_data);
device->FreeWorkspace(_ctx, unique_src_edges_data);
device->FreeWorkspace(_ctx, unique_frequency_data);
return std::make_tuple(res_src, res_dst, res_cnt);
}
template
class FrequencyHashmap<int64_t>;
template
class FrequencyHashmap<int32_t>;
}; // namespace impl
}; // namespace sampling
}; // namespace dgl
| f3f39a57db42c43b5d04e42de441368f54631303.cu | /*!
* Copyright (c) 2021 by Contributors
* \file graph/sampling/frequency_hashmap.cu
* \brief frequency hashmap - used to select top-k frequency edges of each node
*/
#include <algorithm>
#include <tuple>
#include <utility>
#include "../../../runtime/cuda/cuda_common.h"
#include "../../../array/cuda/atomic.cuh"
#include "../../../array/cuda/dgl_cub.cuh"
#include "frequency_hashmap.cuh"
namespace dgl {
namespace sampling {
namespace impl {
namespace {
int64_t _table_size(const int64_t num, const int64_t scale) {
/**
* Calculate the number of buckets in the hashtable. To guarantee we can
* fill the hashtable in the worst case, we must use a number of buckets which
* is a power of two.
* https://en.wikipedia.org/wiki/Quadratic_probing#Limitations
*/
const int64_t next_pow2 = 1 << static_cast<int64_t>(1 + std::log2(num >> 1));
return next_pow2 << scale;
}
template<typename IdxType, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _init_edge_table(void *edge_hashmap, int64_t edges_len) {
using EdgeItem = typename DeviceEdgeHashmap<IdxType>::EdgeItem;
auto edge_hashmap_t = static_cast<EdgeItem*>(edge_hashmap);
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
#pragma unroll(4)
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
if (idx < edges_len) {
EdgeItem *edge = (edge_hashmap_t + idx);
edge->src = static_cast<IdxType>(-1);
edge->cnt = static_cast<IdxType>(0);
}
}
}
template<typename IdxType, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _count_frequency(const IdxType *src_data,
const int64_t num_edges, const int64_t num_edges_per_node,
IdxType *edge_blocks_prefix, bool *is_first_position,
DeviceEdgeHashmap<IdxType> device_edge_hashmap) {
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
IdxType count = 0;
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
if (idx < num_edges) {
IdxType src = src_data[idx];
if (src == static_cast<IdxType>(-1)) {
continue;
}
IdxType dst_idx = (idx / num_edges_per_node);
if (device_edge_hashmap.InsertEdge(src, dst_idx) == 0) {
is_first_position[idx] = true;
++count;
}
}
}
using BlockReduce = typename cub::BlockReduce<IdxType, BLOCK_SIZE>;
__shared__ typename BlockReduce::TempStorage temp_space;
count = BlockReduce(temp_space).Sum(count);
if (threadIdx.x == 0) {
edge_blocks_prefix[blockIdx.x] = count;
if (blockIdx.x == 0) {
edge_blocks_prefix[gridDim.x] = 0;
}
}
}
/**
* This structure is used with cub's block-level prefixscan in order to
* keep a running sum as items are iteratively processed.
*/
template <typename T>
struct BlockPrefixCallbackOp {
T _running_total;
__device__ BlockPrefixCallbackOp(const T running_total)
: _running_total(running_total) {}
__device__ T operator()(const T block_aggregate) {
const T old_prefix = _running_total;
_running_total += block_aggregate;
return old_prefix;
}
};
template<typename IdxType, typename Idx64Type, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _compact_frequency(const IdxType *src_data, const IdxType *dst_data,
const int64_t num_edges, const int64_t num_edges_per_node,
const IdxType *edge_blocks_prefix, const bool *is_first_position,
IdxType *num_unique_each_node,
IdxType *unique_src_edges, Idx64Type *unique_frequency,
DeviceEdgeHashmap<IdxType> device_edge_hashmap) {
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
const IdxType block_offset = edge_blocks_prefix[blockIdx.x];
using BlockScan = typename cub::BlockScan<IdxType, BLOCK_SIZE>;
__shared__ typename BlockScan::TempStorage temp_space;
BlockPrefixCallbackOp<IdxType> prefix_op(0);
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
IdxType flag = 0;
if (idx < num_edges) {
IdxType src = src_data[idx];
IdxType dst_idx = (idx / num_edges_per_node);
if (idx % num_edges_per_node == 0) {
num_unique_each_node[dst_idx] = device_edge_hashmap.GetDstCount(dst_idx);
}
if (is_first_position[idx] == true) {
flag = 1;
}
BlockScan(temp_space).ExclusiveSum(flag, flag, prefix_op);
__syncthreads();
if (is_first_position[idx] == true) {
const IdxType pos = (block_offset + flag);
unique_src_edges[pos] = src;
if (sizeof(IdxType) != sizeof(Idx64Type)
&& sizeof(IdxType) == 4) { // if IdxType is a 32-bit data
unique_frequency[pos] = (
(static_cast<Idx64Type>(num_edges / num_edges_per_node - dst_idx) << 32)
| device_edge_hashmap.GetEdgeCount(src, dst_idx));
} else {
unique_frequency[pos] = device_edge_hashmap.GetEdgeCount(src, dst_idx);
}
}
}
}
}
template<typename IdxType, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _get_pick_num(IdxType *num_unique_each_node,
const int64_t num_pick, const int64_t num_dst_nodes) {
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
#pragma unroll(4)
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
if (idx < num_dst_nodes) {
IdxType &num_unique = num_unique_each_node[idx];
num_unique = min(num_unique, static_cast<IdxType>(num_pick));
}
}
}
template<typename IdxType, typename Idx64Type, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _pick_data(const Idx64Type *unique_frequency, const IdxType *unique_src_edges,
const IdxType *unique_input_offsets, const IdxType *dst_data,
const int64_t num_edges_per_node, const int64_t num_dst_nodes,
const int64_t num_edges,
const IdxType *unique_output_offsets,
IdxType *output_src, IdxType *output_dst, IdxType *output_frequency) {
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
if (idx < num_dst_nodes) {
const int64_t dst_pos = (idx * num_edges_per_node);
assert(dst_pos < num_edges);
const IdxType dst = dst_data[dst_pos];
const IdxType last_output_offset = unique_output_offsets[idx + 1];
assert((last_output_offset - unique_output_offsets[idx]) <=
(unique_input_offsets[idx + 1] - unique_input_offsets[idx]));
for (IdxType output_idx = unique_output_offsets[idx], input_idx = unique_input_offsets[idx];
output_idx < last_output_offset; ++output_idx, ++input_idx) {
output_src[output_idx] = unique_src_edges[input_idx];
output_dst[output_idx] = dst;
output_frequency[output_idx] = static_cast<IdxType>(unique_frequency[input_idx]);
}
}
}
}
} // namespace
// return the old cnt of this edge
template<typename IdxType>
inline __device__ IdxType DeviceEdgeHashmap<IdxType>::InsertEdge(
const IdxType &src, const IdxType &dst_idx) {
IdxType start_off = dst_idx * _num_items_each_dst;
IdxType pos = EdgeHash(src);
IdxType delta = 1;
IdxType old_cnt = static_cast<IdxType>(-1);
while (true) {
IdxType old_src = dgl::aten::cuda::AtomicCAS(
&_edge_hashmap[start_off + pos].src, static_cast<IdxType>(-1), src);
if (old_src == static_cast<IdxType>(-1) || old_src == src) {
// first insert
old_cnt = dgl::aten::cuda::AtomicAdd(
&_edge_hashmap[start_off + pos].cnt, static_cast<IdxType>(1));
if (old_src == static_cast<IdxType>(-1)) {
assert(dst_idx < _num_dst);
dgl::aten::cuda::AtomicAdd(&_dst_unique_edges[dst_idx], static_cast<IdxType>(1));
}
break;
}
pos = EdgeHash(pos + delta);
delta += 1;
}
return old_cnt;
}
template<typename IdxType>
inline __device__ IdxType DeviceEdgeHashmap<IdxType>::GetDstCount(const IdxType &dst_idx) {
return _dst_unique_edges[dst_idx];
}
template<typename IdxType>
inline __device__ IdxType DeviceEdgeHashmap<IdxType>::GetEdgeCount(
const IdxType &src, const IdxType &dst_idx) {
IdxType start_off = dst_idx * _num_items_each_dst;
IdxType pos = EdgeHash(src);
IdxType delta = 1;
while (_edge_hashmap[start_off + pos].src != src) {
pos = EdgeHash(pos + delta);
delta += 1;
}
return _edge_hashmap[start_off + pos].cnt;
}
template <typename IdxType>
FrequencyHashmap<IdxType>::FrequencyHashmap(
int64_t num_dst, int64_t num_items_each_dst, DGLContext ctx,
cudaStream_t stream, int64_t edge_table_scale) {
_ctx = ctx;
_stream = stream;
num_items_each_dst = _table_size(num_items_each_dst, edge_table_scale);
auto device = dgl::runtime::DeviceAPI::Get(_ctx);
auto dst_unique_edges = static_cast<IdxType*>(
device->AllocWorkspace(_ctx, (num_dst) * sizeof(IdxType)));
auto edge_hashmap = static_cast<EdgeItem*>(
device->AllocWorkspace(_ctx, (num_dst * num_items_each_dst) * sizeof(EdgeItem)));
constexpr int BLOCK_SIZE = 256;
constexpr int TILE_SIZE = BLOCK_SIZE * 8;
dim3 block(BLOCK_SIZE);
dim3 grid((num_dst * num_items_each_dst + TILE_SIZE - 1) / TILE_SIZE);
CUDA_CALL(cudaMemset(dst_unique_edges, 0, (num_dst) * sizeof(IdxType)));
CUDA_KERNEL_CALL((_init_edge_table<IdxType, BLOCK_SIZE, TILE_SIZE>),
grid, block, 0, _stream,
edge_hashmap, (num_dst * num_items_each_dst));
_device_edge_hashmap = new DeviceEdgeHashmap<IdxType>(
num_dst, num_items_each_dst, dst_unique_edges, edge_hashmap);
_dst_unique_edges = dst_unique_edges;
_edge_hashmap = edge_hashmap;
}
template <typename IdxType>
FrequencyHashmap<IdxType>::~FrequencyHashmap() {
auto device = dgl::runtime::DeviceAPI::Get(_ctx);
delete _device_edge_hashmap;
_device_edge_hashmap = nullptr;
device->FreeWorkspace(_ctx, _dst_unique_edges);
_dst_unique_edges = nullptr;
device->FreeWorkspace(_ctx, _edge_hashmap);
_edge_hashmap = nullptr;
}
template <typename IdxType>
std::tuple<IdArray, IdArray, IdArray> FrequencyHashmap<IdxType>::Topk(
const IdxType *src_data, const IdxType *dst_data, DLDataType dtype,
const int64_t num_edges, const int64_t num_edges_per_node,
const int64_t num_pick) {
using Idx64Type = int64_t;
const int64_t num_dst_nodes = (num_edges / num_edges_per_node);
constexpr int BLOCK_SIZE = 256;
// XXX: a experienced value, best performance in GV100
constexpr int TILE_SIZE = BLOCK_SIZE * 32;
const dim3 block(BLOCK_SIZE);
const dim3 edges_grid((num_edges + TILE_SIZE - 1) / TILE_SIZE);
auto device = dgl::runtime::DeviceAPI::Get(_ctx);
const IdxType num_edge_blocks = static_cast<IdxType>(edges_grid.x);
IdxType num_unique_edges = 0;
// to mark if this position of edges is the first inserting position for _edge_hashmap
bool *is_first_position = static_cast<bool*>(
device->AllocWorkspace(_ctx, sizeof(bool) * (num_edges)));
CUDA_CALL(cudaMemset(is_first_position, 0, sizeof(bool) * (num_edges)));
// double space to use ExclusiveSum
auto edge_blocks_prefix_data = static_cast<IdxType*>(
device->AllocWorkspace(_ctx, 2 * sizeof(IdxType) * (num_edge_blocks + 1)));
IdxType *edge_blocks_prefix = edge_blocks_prefix_data;
IdxType *edge_blocks_prefix_alternate = (edge_blocks_prefix_data + (num_edge_blocks + 1));
// triple space to use ExclusiveSum and unique_output_offsets
auto num_unique_each_node_data = static_cast<IdxType*>(
device->AllocWorkspace(_ctx, 3 * sizeof(IdxType) * (num_dst_nodes + 1)));
IdxType *num_unique_each_node = num_unique_each_node_data;
IdxType *num_unique_each_node_alternate = (num_unique_each_node_data + (num_dst_nodes + 1));
IdxType *unique_output_offsets = (num_unique_each_node_data + 2 * (num_dst_nodes + 1));
// 1. Scan the all edges and count the unique edges and unique edges for each dst node
CUDA_KERNEL_CALL((_count_frequency<IdxType, BLOCK_SIZE, TILE_SIZE>),
edges_grid, block, 0, _stream,
src_data, num_edges, num_edges_per_node,
edge_blocks_prefix, is_first_position, *_device_edge_hashmap);
// 2. Compact the unique edges frequency
// 2.1 ExclusiveSum the edge_blocks_prefix
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
CUDA_CALL(cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
edge_blocks_prefix, edge_blocks_prefix_alternate, num_edge_blocks + 1));
d_temp_storage = device->AllocWorkspace(_ctx, temp_storage_bytes);
CUDA_CALL(cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
edge_blocks_prefix, edge_blocks_prefix_alternate, num_edge_blocks + 1));
device->FreeWorkspace(_ctx, d_temp_storage);
std::swap(edge_blocks_prefix, edge_blocks_prefix_alternate);
device->CopyDataFromTo(&edge_blocks_prefix[num_edge_blocks], 0, &num_unique_edges, 0,
sizeof(num_unique_edges),
_ctx, DGLContext{kDLCPU, 0},
dtype, _stream);
device->StreamSync(_ctx, _stream);
// 2.2 Allocate the data of unique edges and frequency
// double space to use SegmentedRadixSort
auto unique_src_edges_data = static_cast<IdxType*>(
device->AllocWorkspace(_ctx, 2 * sizeof(IdxType) * (num_unique_edges)));
IdxType *unique_src_edges = unique_src_edges_data;
IdxType *unique_src_edges_alternate = unique_src_edges_data + num_unique_edges;
// double space to use SegmentedRadixSort
auto unique_frequency_data = static_cast<Idx64Type*>(
device->AllocWorkspace(_ctx, 2 * sizeof(Idx64Type) * (num_unique_edges)));
Idx64Type *unique_frequency = unique_frequency_data;
Idx64Type *unique_frequency_alternate = unique_frequency_data + num_unique_edges;
// 2.3 Compact the unique edges and their frequency
CUDA_KERNEL_CALL((_compact_frequency<IdxType, Idx64Type, BLOCK_SIZE, TILE_SIZE>),
edges_grid, block, 0, _stream,
src_data, dst_data, num_edges, num_edges_per_node,
edge_blocks_prefix, is_first_position, num_unique_each_node,
unique_src_edges, unique_frequency, *_device_edge_hashmap);
// 3. SegmentedRadixSort the unique edges and unique_frequency
// 3.1 ExclusiveSum the num_unique_each_node
d_temp_storage = nullptr;
temp_storage_bytes = 0;
CUDA_CALL(cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
num_unique_each_node, num_unique_each_node_alternate, num_dst_nodes + 1));
d_temp_storage = device->AllocWorkspace(_ctx, temp_storage_bytes);
CUDA_CALL(cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
num_unique_each_node, num_unique_each_node_alternate, num_dst_nodes + 1));
device->FreeWorkspace(_ctx, d_temp_storage);
// 3.2 SegmentedRadixSort the unique_src_edges and unique_frequency
// Create a set of DoubleBuffers to wrap pairs of device pointers
cub::DoubleBuffer<Idx64Type> d_unique_frequency(unique_frequency, unique_frequency_alternate);
cub::DoubleBuffer<IdxType> d_unique_src_edges(unique_src_edges, unique_src_edges_alternate);
// Determine temporary device storage requirements
d_temp_storage = nullptr;
temp_storage_bytes = 0;
// the DeviceRadixSort is faster than DeviceSegmentedRadixSort,
// especially when num_dst_nodes is large (about ~10000)
if (dtype.bits == 32) {
CUDA_CALL(cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes,
d_unique_frequency, d_unique_src_edges, num_unique_edges));
} else {
CUDA_CALL(cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes,
d_unique_frequency, d_unique_src_edges, num_unique_edges, num_dst_nodes,
num_unique_each_node_alternate, num_unique_each_node_alternate + 1));
}
d_temp_storage = device->AllocWorkspace(_ctx, temp_storage_bytes);
if (dtype.bits == 32) {
CUDA_CALL(cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes,
d_unique_frequency, d_unique_src_edges, num_unique_edges));
} else {
CUDA_CALL(cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes,
d_unique_frequency, d_unique_src_edges, num_unique_edges, num_dst_nodes,
num_unique_each_node_alternate, num_unique_each_node_alternate + 1));
}
device->FreeWorkspace(_ctx, d_temp_storage);
// 4. Get the final pick number for each dst node
// 4.1 Reset the min(num_pick, num_unique_each_node) to num_unique_each_node
constexpr int NODE_TILE_SIZE = BLOCK_SIZE * 2;
const dim3 nodes_grid((num_dst_nodes + NODE_TILE_SIZE - 1) / NODE_TILE_SIZE);
CUDA_KERNEL_CALL((_get_pick_num<IdxType, BLOCK_SIZE, NODE_TILE_SIZE>),
nodes_grid, block, 0, _stream,
num_unique_each_node, num_pick, num_dst_nodes);
// 4.2 ExclusiveSum the new num_unique_each_node as unique_output_offsets
// use unique_output_offsets;
d_temp_storage = nullptr;
temp_storage_bytes = 0;
CUDA_CALL(cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
num_unique_each_node, unique_output_offsets, num_dst_nodes + 1));
d_temp_storage = device->AllocWorkspace(_ctx, temp_storage_bytes);
CUDA_CALL(cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
num_unique_each_node, unique_output_offsets, num_dst_nodes + 1));
device->FreeWorkspace(_ctx, d_temp_storage);
// 5. Pick the data to result
IdxType num_output = 0;
device->CopyDataFromTo(&unique_output_offsets[num_dst_nodes], 0, &num_output, 0,
sizeof(num_output),
_ctx, DGLContext{kDLCPU, 0},
dtype, _stream);
device->StreamSync(_ctx, _stream);
IdArray res_src = IdArray::Empty({static_cast<int64_t>(num_output)},
dtype, _ctx);
IdArray res_dst = IdArray::Empty({static_cast<int64_t>(num_output)},
dtype, _ctx);
IdArray res_cnt = IdArray::Empty({static_cast<int64_t>(num_output)},
dtype, _ctx);
CUDA_KERNEL_CALL((_pick_data<IdxType, Idx64Type, BLOCK_SIZE, NODE_TILE_SIZE>),
nodes_grid, block, 0, _stream,
d_unique_frequency.Current(), d_unique_src_edges.Current(), num_unique_each_node_alternate,
dst_data, num_edges_per_node, num_dst_nodes, num_edges,
unique_output_offsets,
res_src.Ptr<IdxType>(), res_dst.Ptr<IdxType>(), res_cnt.Ptr<IdxType>());
device->FreeWorkspace(_ctx, is_first_position);
device->FreeWorkspace(_ctx, edge_blocks_prefix_data);
device->FreeWorkspace(_ctx, num_unique_each_node_data);
device->FreeWorkspace(_ctx, unique_src_edges_data);
device->FreeWorkspace(_ctx, unique_frequency_data);
return std::make_tuple(res_src, res_dst, res_cnt);
}
template
class FrequencyHashmap<int64_t>;
template
class FrequencyHashmap<int32_t>;
}; // namespace impl
}; // namespace sampling
}; // namespace dgl
|
f54b5825e5158b282af3bcae8da3058fec4bb5ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "yak/kfusion/cuda/device.hpp"
#include "yak/kfusion/cuda/texture_binder.hpp"
namespace kfusion
{
namespace device
{
texture<ushort, 2> dprev_tex;
texture<Normal, 2> nprev_tex;
texture<Point, 2> vprev_tex;
struct ComputeIcpHelper::Policy
{
enum
{
CTA_SIZE_X = 32, CTA_SIZE_Y = 8, CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
B = 6, COLS = 6, ROWS = 6, DIAG = 6, UPPER_DIAG_MAT = (COLS * ROWS - DIAG) / 2 + DIAG, TOTAL = UPPER_DIAG_MAT + B,
FINAL_REDUCE_CTA_SIZE = 256, FINAL_REDUCE_STRIDE = FINAL_REDUCE_CTA_SIZE
};
};
__kf_device__
float2 ComputeIcpHelper::proj(const float3& p) const
{
float2 coo;
coo.x = __fmaf_rn(f.x, __fdividef(p.x, p.z), c.x);
coo.y = __fmaf_rn(f.y, __fdividef(p.y, p.z), c.y);
return coo;
}
__kf_device__
float3 ComputeIcpHelper::reproj(float u, float v, float z) const
{
float x = z * (u - c.x) * finv.x;
float y = z * (v - c.y) * finv.y;
return make_float3(x, y, z);
}
#if defined USE_DEPTH
__kf_device__
int ComputeIcpHelper::find_coresp(int x, int y, float3& nd, float3& d, float3& s) const
{
int src_z = dcurr(y, x);
if (src_z == 0)
return 40;
s = aff * reproj(x, y, src_z * 0.001f);
float2 coo = proj(s);
if (s.z <= 0 || coo.x < 0 || coo.y < 0 || coo.x >= cols || coo.y >= rows)
return 80;
int dst_z = tex2D(dprev_tex, coo.x, coo.y);
if (dst_z == 0)
return 120;
d = reproj(coo.x, coo.y, dst_z * 0.001f);
float dist2 = norm_sqr(s - d);
if (dist2 > dist2_thres)
return 160;
float3 ns = aff.R * tr(ncurr(y, x));
nd = tr(tex2D(nprev_tex, coo.x, coo.y));
float cosine = fabs(dot(ns, nd));
if (cosine < min_cosine)
return 200;
return 0;
}
#else
__kf_device__
int ComputeIcpHelper::find_coresp(int x, int y, float3& nd, float3& d, float3& s) const
{
s = tr(vcurr(y, x));
if (isnan(s.x))
return 40;
s = aff * s;
float2 coo = proj(s);
if (s.z <= 0 || coo.x < 0 || coo.y < 0 || coo.x >= cols || coo.y >= rows)
return 80;
d = tr(tex2D(vprev_tex, coo.x, coo.y));
if (isnan(d.x))
return 120;
float dist2 = norm_sqr(s - d);
if (dist2 > dist2_thres)
return 160;
float3 ns = aff.R * tr(ncurr(y, x));
nd = tr(tex2D(nprev_tex, coo.x, coo.y));
float cosine = fabs(dot(ns, nd));
if (cosine < min_cosine)
return 200;
return 0;
}
#endif
__kf_device__
void ComputeIcpHelper::partial_reduce(const float row[7], PtrStep<float>& partial_buf) const
{
volatile __shared__ float smem[Policy::CTA_SIZE];
int tid = Block::flattenedThreadId();
float *pos = partial_buf.data + blockIdx.x + gridDim.x * blockIdx.y;
size_t step = partial_buf.step / sizeof(float);
#define STOR \
if (tid == 0) \
{ \
*pos = smem[0]; \
pos += step; \
}
__syncthreads();
smem[tid] = row[0] * row[0];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[0] * row[1];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[0] * row[2];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[0] * row[3];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[0] * row[4];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[0] * row[5];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[0] * row[6];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
////////////////////////////////
__syncthreads();
smem[tid] = row[1] * row[1];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[1] * row[2];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[1] * row[3];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[1] * row[4];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[1] * row[5];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[1] * row[6];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
////////////////////////////////
__syncthreads();
smem[tid] = row[2] * row[2];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[2] * row[3];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[2] * row[4];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[2] * row[5];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[2] * row[6];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
////////////////////////////////
__syncthreads();
smem[tid] = row[3] * row[3];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[3] * row[4];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[3] * row[5];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[3] * row[6];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
///////////////////////////////////////////////////
__syncthreads();
smem[tid] = row[4] * row[4];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[4] * row[5];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[4] * row[6];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
///////////////////////////////////////////////////
__syncthreads();
smem[tid] = row[5] * row[5];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[5] * row[6];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
}
__global__ void icp_helper_kernel(const ComputeIcpHelper helper, PtrStep<float> partial_buf)
{
int x = threadIdx.x + blockIdx.x * ComputeIcpHelper::Policy::CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * ComputeIcpHelper::Policy::CTA_SIZE_Y;
float3 n, d, s;
int filtered = (x < helper.cols && y < helper.rows) ? helper.find_coresp(x, y, n, d, s) : 1;
//if (x < helper.cols && y < helper.rows) mask(y, x) = filtered;
float row[7];
if (!filtered)
{
*(float3*) &row[0] = cross(s, n);
*(float3*) &row[3] = n;
row[6] = dot(n, d - s);
}
else
row[0] = row[1] = row[2] = row[3] = row[4] = row[5] = row[6] = 0.f;
helper.partial_reduce(row, partial_buf);
}
__global__ void icp_final_reduce_kernel(const PtrStep<float> partial_buf, const int length, float* final_buf)
{
const float *beg = partial_buf.ptr(blockIdx.x);
const float *end = beg + length;
int tid = threadIdx.x;
float sum = 0.f;
for (const float *t = beg + tid; t < end; t += ComputeIcpHelper::Policy::FINAL_REDUCE_STRIDE)
sum += *t;
__shared__ float smem[ComputeIcpHelper::Policy::FINAL_REDUCE_CTA_SIZE];
smem[tid] = sum;
__syncthreads();
Block::reduce<ComputeIcpHelper::Policy::FINAL_REDUCE_CTA_SIZE>(smem, plus());
if (tid == 0)
final_buf[blockIdx.x] = smem[0];
}
}
}
void kfusion::device::ComputeIcpHelper::operator()(const Depth& dprev, const Normals& nprev, DeviceArray2D<float>& buffer, float* data, hipStream_t s)
{
dprev_tex.filterMode = hipFilterModePoint;
nprev_tex.filterMode = hipFilterModePoint;
TextureBinder dprev_binder(dprev, dprev_tex);
TextureBinder nprev_binder(nprev, nprev_tex);
dim3 block(Policy::CTA_SIZE_X, Policy::CTA_SIZE_Y);
dim3 grid(divUp((int) cols, block.x), divUp((int) rows, block.y));
int partials_count = (int) (grid.x * grid.y);
allocate_buffer(buffer, partials_count);
hipLaunchKernelGGL(( icp_helper_kernel), dim3(grid), dim3(block), 0, s, *this, buffer);
cudaSafeCall(hipGetLastError());
int b = Policy::FINAL_REDUCE_CTA_SIZE;
int g = Policy::TOTAL;
hipLaunchKernelGGL(( icp_final_reduce_kernel), dim3(g), dim3(b), 0, s, buffer, partials_count, buffer.ptr(Policy::TOTAL));
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipMemcpyAsync(data, buffer.ptr(Policy::TOTAL), Policy::TOTAL * sizeof(float), hipMemcpyDeviceToHost, s));
cudaSafeCall(hipGetLastError());
}
void kfusion::device::ComputeIcpHelper::operator()(const Points& vprev, const Normals& nprev, DeviceArray2D<float>& buffer, float* data, hipStream_t s)
{
dprev_tex.filterMode = hipFilterModePoint;
nprev_tex.filterMode = hipFilterModePoint;
TextureBinder vprev_binder(vprev, vprev_tex);
TextureBinder nprev_binder(nprev, nprev_tex);
dim3 block(Policy::CTA_SIZE_X, Policy::CTA_SIZE_Y);
dim3 grid(divUp((int) cols, block.x), divUp((int) rows, block.y));
int partials_count = (int) (grid.x * grid.y);
allocate_buffer(buffer, partials_count);
hipLaunchKernelGGL(( icp_helper_kernel), dim3(grid), dim3(block), 0, s, *this, buffer);
cudaSafeCall(hipGetLastError());
int b = Policy::FINAL_REDUCE_CTA_SIZE;
int g = Policy::TOTAL;
hipLaunchKernelGGL(( icp_final_reduce_kernel), dim3(g), dim3(b), 0, s, buffer, partials_count, buffer.ptr(Policy::TOTAL));
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipMemcpyAsync(data, buffer.ptr(Policy::TOTAL), Policy::TOTAL * sizeof(float), hipMemcpyDeviceToHost, s));
cudaSafeCall(hipGetLastError());
}
void kfusion::device::ComputeIcpHelper::allocate_buffer(DeviceArray2D<float>& buffer, int partials_count)
{
if (partials_count < 0)
{
const int input_cols = 640;
const int input_rows = 480;
int gx = divUp(input_cols, Policy::CTA_SIZE_X);
int gy = divUp(input_rows, Policy::CTA_SIZE_Y);
partials_count = gx * gy;
}
int min_rows = Policy::TOTAL + 1;
int min_cols = max(partials_count, Policy::TOTAL);
if (buffer.rows() < min_rows || buffer.cols() < min_cols)
buffer.create(min_rows, min_cols);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// ComputeIcpHelper::PageLockHelper
kfusion::device::ComputeIcpHelper::PageLockHelper::PageLockHelper() :
data(0)
{
cudaSafeCall(hipHostMalloc((void ** )&data, Policy::TOTAL * sizeof(float)));
}
kfusion::device::ComputeIcpHelper::PageLockHelper::~PageLockHelper()
{
cudaSafeCall(hipHostFree(data));
data = 0;
}
| f54b5825e5158b282af3bcae8da3058fec4bb5ac.cu | #include "yak/kfusion/cuda/device.hpp"
#include "yak/kfusion/cuda/texture_binder.hpp"
namespace kfusion
{
namespace device
{
texture<ushort, 2> dprev_tex;
texture<Normal, 2> nprev_tex;
texture<Point, 2> vprev_tex;
struct ComputeIcpHelper::Policy
{
enum
{
CTA_SIZE_X = 32, CTA_SIZE_Y = 8, CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
B = 6, COLS = 6, ROWS = 6, DIAG = 6, UPPER_DIAG_MAT = (COLS * ROWS - DIAG) / 2 + DIAG, TOTAL = UPPER_DIAG_MAT + B,
FINAL_REDUCE_CTA_SIZE = 256, FINAL_REDUCE_STRIDE = FINAL_REDUCE_CTA_SIZE
};
};
__kf_device__
float2 ComputeIcpHelper::proj(const float3& p) const
{
float2 coo;
coo.x = __fmaf_rn(f.x, __fdividef(p.x, p.z), c.x);
coo.y = __fmaf_rn(f.y, __fdividef(p.y, p.z), c.y);
return coo;
}
__kf_device__
float3 ComputeIcpHelper::reproj(float u, float v, float z) const
{
float x = z * (u - c.x) * finv.x;
float y = z * (v - c.y) * finv.y;
return make_float3(x, y, z);
}
#if defined USE_DEPTH
__kf_device__
int ComputeIcpHelper::find_coresp(int x, int y, float3& nd, float3& d, float3& s) const
{
int src_z = dcurr(y, x);
if (src_z == 0)
return 40;
s = aff * reproj(x, y, src_z * 0.001f);
float2 coo = proj(s);
if (s.z <= 0 || coo.x < 0 || coo.y < 0 || coo.x >= cols || coo.y >= rows)
return 80;
int dst_z = tex2D(dprev_tex, coo.x, coo.y);
if (dst_z == 0)
return 120;
d = reproj(coo.x, coo.y, dst_z * 0.001f);
float dist2 = norm_sqr(s - d);
if (dist2 > dist2_thres)
return 160;
float3 ns = aff.R * tr(ncurr(y, x));
nd = tr(tex2D(nprev_tex, coo.x, coo.y));
float cosine = fabs(dot(ns, nd));
if (cosine < min_cosine)
return 200;
return 0;
}
#else
__kf_device__
int ComputeIcpHelper::find_coresp(int x, int y, float3& nd, float3& d, float3& s) const
{
s = tr(vcurr(y, x));
if (isnan(s.x))
return 40;
s = aff * s;
float2 coo = proj(s);
if (s.z <= 0 || coo.x < 0 || coo.y < 0 || coo.x >= cols || coo.y >= rows)
return 80;
d = tr(tex2D(vprev_tex, coo.x, coo.y));
if (isnan(d.x))
return 120;
float dist2 = norm_sqr(s - d);
if (dist2 > dist2_thres)
return 160;
float3 ns = aff.R * tr(ncurr(y, x));
nd = tr(tex2D(nprev_tex, coo.x, coo.y));
float cosine = fabs(dot(ns, nd));
if (cosine < min_cosine)
return 200;
return 0;
}
#endif
__kf_device__
void ComputeIcpHelper::partial_reduce(const float row[7], PtrStep<float>& partial_buf) const
{
volatile __shared__ float smem[Policy::CTA_SIZE];
int tid = Block::flattenedThreadId();
float *pos = partial_buf.data + blockIdx.x + gridDim.x * blockIdx.y;
size_t step = partial_buf.step / sizeof(float);
#define STOR \
if (tid == 0) \
{ \
*pos = smem[0]; \
pos += step; \
}
__syncthreads();
smem[tid] = row[0] * row[0];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[0] * row[1];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[0] * row[2];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[0] * row[3];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[0] * row[4];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[0] * row[5];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[0] * row[6];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
////////////////////////////////
__syncthreads();
smem[tid] = row[1] * row[1];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[1] * row[2];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[1] * row[3];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[1] * row[4];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[1] * row[5];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[1] * row[6];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
////////////////////////////////
__syncthreads();
smem[tid] = row[2] * row[2];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[2] * row[3];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[2] * row[4];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[2] * row[5];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[2] * row[6];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
////////////////////////////////
__syncthreads();
smem[tid] = row[3] * row[3];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[3] * row[4];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[3] * row[5];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[3] * row[6];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
///////////////////////////////////////////////////
__syncthreads();
smem[tid] = row[4] * row[4];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[4] * row[5];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[4] * row[6];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
///////////////////////////////////////////////////
__syncthreads();
smem[tid] = row[5] * row[5];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
__syncthreads();
smem[tid] = row[5] * row[6];
__syncthreads();
Block::reduce<Policy::CTA_SIZE>(smem, plus());
STOR
}
__global__ void icp_helper_kernel(const ComputeIcpHelper helper, PtrStep<float> partial_buf)
{
int x = threadIdx.x + blockIdx.x * ComputeIcpHelper::Policy::CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * ComputeIcpHelper::Policy::CTA_SIZE_Y;
float3 n, d, s;
int filtered = (x < helper.cols && y < helper.rows) ? helper.find_coresp(x, y, n, d, s) : 1;
//if (x < helper.cols && y < helper.rows) mask(y, x) = filtered;
float row[7];
if (!filtered)
{
*(float3*) &row[0] = cross(s, n);
*(float3*) &row[3] = n;
row[6] = dot(n, d - s);
}
else
row[0] = row[1] = row[2] = row[3] = row[4] = row[5] = row[6] = 0.f;
helper.partial_reduce(row, partial_buf);
}
__global__ void icp_final_reduce_kernel(const PtrStep<float> partial_buf, const int length, float* final_buf)
{
const float *beg = partial_buf.ptr(blockIdx.x);
const float *end = beg + length;
int tid = threadIdx.x;
float sum = 0.f;
for (const float *t = beg + tid; t < end; t += ComputeIcpHelper::Policy::FINAL_REDUCE_STRIDE)
sum += *t;
__shared__ float smem[ComputeIcpHelper::Policy::FINAL_REDUCE_CTA_SIZE];
smem[tid] = sum;
__syncthreads();
Block::reduce<ComputeIcpHelper::Policy::FINAL_REDUCE_CTA_SIZE>(smem, plus());
if (tid == 0)
final_buf[blockIdx.x] = smem[0];
}
}
}
void kfusion::device::ComputeIcpHelper::operator()(const Depth& dprev, const Normals& nprev, DeviceArray2D<float>& buffer, float* data, cudaStream_t s)
{
dprev_tex.filterMode = cudaFilterModePoint;
nprev_tex.filterMode = cudaFilterModePoint;
TextureBinder dprev_binder(dprev, dprev_tex);
TextureBinder nprev_binder(nprev, nprev_tex);
dim3 block(Policy::CTA_SIZE_X, Policy::CTA_SIZE_Y);
dim3 grid(divUp((int) cols, block.x), divUp((int) rows, block.y));
int partials_count = (int) (grid.x * grid.y);
allocate_buffer(buffer, partials_count);
icp_helper_kernel<<<grid, block, 0, s>>>(*this, buffer);
cudaSafeCall(cudaGetLastError());
int b = Policy::FINAL_REDUCE_CTA_SIZE;
int g = Policy::TOTAL;
icp_final_reduce_kernel<<<g, b, 0, s>>>(buffer, partials_count, buffer.ptr(Policy::TOTAL));
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaMemcpyAsync(data, buffer.ptr(Policy::TOTAL), Policy::TOTAL * sizeof(float), cudaMemcpyDeviceToHost, s));
cudaSafeCall(cudaGetLastError());
}
void kfusion::device::ComputeIcpHelper::operator()(const Points& vprev, const Normals& nprev, DeviceArray2D<float>& buffer, float* data, cudaStream_t s)
{
dprev_tex.filterMode = cudaFilterModePoint;
nprev_tex.filterMode = cudaFilterModePoint;
TextureBinder vprev_binder(vprev, vprev_tex);
TextureBinder nprev_binder(nprev, nprev_tex);
dim3 block(Policy::CTA_SIZE_X, Policy::CTA_SIZE_Y);
dim3 grid(divUp((int) cols, block.x), divUp((int) rows, block.y));
int partials_count = (int) (grid.x * grid.y);
allocate_buffer(buffer, partials_count);
icp_helper_kernel<<<grid, block, 0, s>>>(*this, buffer);
cudaSafeCall(cudaGetLastError());
int b = Policy::FINAL_REDUCE_CTA_SIZE;
int g = Policy::TOTAL;
icp_final_reduce_kernel<<<g, b, 0, s>>>(buffer, partials_count, buffer.ptr(Policy::TOTAL));
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaMemcpyAsync(data, buffer.ptr(Policy::TOTAL), Policy::TOTAL * sizeof(float), cudaMemcpyDeviceToHost, s));
cudaSafeCall(cudaGetLastError());
}
void kfusion::device::ComputeIcpHelper::allocate_buffer(DeviceArray2D<float>& buffer, int partials_count)
{
if (partials_count < 0)
{
const int input_cols = 640;
const int input_rows = 480;
int gx = divUp(input_cols, Policy::CTA_SIZE_X);
int gy = divUp(input_rows, Policy::CTA_SIZE_Y);
partials_count = gx * gy;
}
int min_rows = Policy::TOTAL + 1;
int min_cols = max(partials_count, Policy::TOTAL);
if (buffer.rows() < min_rows || buffer.cols() < min_cols)
buffer.create(min_rows, min_cols);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// ComputeIcpHelper::PageLockHelper
kfusion::device::ComputeIcpHelper::PageLockHelper::PageLockHelper() :
data(0)
{
cudaSafeCall(cudaMallocHost((void ** )&data, Policy::TOTAL * sizeof(float)));
}
kfusion::device::ComputeIcpHelper::PageLockHelper::~PageLockHelper()
{
cudaSafeCall(cudaFreeHost(data));
data = 0;
}
|
5335a7a75603e8570fff271ef55718fdb3fdc85c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int idx = blockDim.x * blockIdx.x + threadIdx.x;
greyImage[idx] = (0.299 * rgbaImage[idx].x) + (0.587 * rgbaImage[idx].y) + (0.114 * rgbaImage[idx].z);
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(numRows, 1, 1);
const dim3 gridSize( numCols, 1, 1);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 5335a7a75603e8570fff271ef55718fdb3fdc85c.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int idx = blockDim.x * blockIdx.x + threadIdx.x;
greyImage[idx] = (0.299 * rgbaImage[idx].x) + (0.587 * rgbaImage[idx].y) + (0.114 * rgbaImage[idx].z);
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(numRows, 1, 1);
const dim3 gridSize( numCols, 1, 1);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
d7d147b648fb678dd2d2b33be14cef66bb5252d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include "caffe2/core/context_gpu.h"
// #include "caffe2/operators/top_k_heap_selection.cuh"
#include "caffe2/operators/top_k_radix_selection.cuh"
#include "caffe2/utils/math.h"
#include "select_bottom_n_single_op.h"
namespace caffe2 {
namespace {
template <typename TIndex>
__global__ void SetIndex(const int nthreads,
TIndex* output) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// just set the index
output[index] = static_cast<TIndex>(index);
}
}
// Does not work when K is larger than 512
// template <typename T, int kHeapSize, bool kSelectMax = false>
// void RunHeapSelectionImpl(
// const T* input,
// const TIndex outer_size,
// const TIndex inner_size,
// const int k,
// T* values,
// TIndex* indices,
// CUDAContext* context) {
// constexpr int kBlockSize = 256;
// constexpr int kNumWarps = kBlockSize / kWarpSize;
// constexpr int smem = kNumWarps * kHeapSize * (sizeof(T) + sizeof(TIndex));
// constexpr T kInitVal = kSelectMax ? std::numeric_limits<T>::lowest()
// : std::numeric_limits<T>::max();
// selectRowsViaHeap<T, TIndex, TIndex, kBlockSize, kHeapSize, kSelectMax>
// <<<outer_size, kBlockSize, smem, context->cuda_stream()>>>(
// input,
// values,
// indices,
// kInitVal,
// std::numeric_limits<TIndex>::max(),
// outer_size,
// inner_size,
// k);
// }
// Stupid that it only works when selecting the Bottom K
template <typename T, bool kSelectMax = false>
void RunRadixSelectionImpl(
const T* input,
const TIndex outer_size,
const TIndex inner_size,
const int k,
T* values,
TIndex* indices,
CUDAContext* context) {
const int block = ::min(
math::roundUp(static_cast<int>(inner_size), kWarpSize), CAFFE_CUDA_NUM_THREADS);
hipLaunchKernelGGL(( gatherTopK<T, kSelectMax, TIndex>)
, dim3(outer_size), dim3(block), 0, context->cuda_stream(),
input, inner_size, k, outer_size, values, indices);
}
} // namespace
template<>
bool SelectBottomNSingleOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data
DCHECK_EQ(X.ndim(), 4);
const int num_images = X.dim32(0);
DCHECK_LT(im_, num_images);
const int num_probs = X.dim32(1) * X.dim32(2) * X.dim32(3);
const float* Xp = X.data<float>() + im_ * num_probs;
auto* Yi = Output(0);
auto* Yv = Output(1);
if (num_probs <= top_n_) {
// just select everything
Yi->Resize(num_probs);
Yv->Resize(num_probs);
hipLaunchKernelGGL(( SetIndex<TIndex>), dim3(CAFFE_GET_BLOCKS(num_probs)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(), num_probs,
Yi->mutable_data<TIndex>());
context_.Copy<float, CUDAContext, CUDAContext>(num_probs, Xp,
Yv->mutable_data<float>());
return true;
}
Yi->Resize(top_n_);
Yv->Resize(top_n_);
// do the top_k selection thing, heap sort seems not working
// RunHeapSelectionImpl<float, 1024>(Xp,
// 1,
// num_probs,
// top_n_,
// Yv->mutable_data<float>(),
// Yi->mutable_data<TIndex>(),
// &context_);
RunRadixSelectionImpl<float>(Xp,
1,
num_probs,
top_n_,
Yv->mutable_data<float>(),
Yi->mutable_data<TIndex>(),
&context_);
return true;
}
REGISTER_CUDA_OPERATOR(SelectBottomNSingle,
SelectBottomNSingleOp<float, CUDAContext>);
} // namespace caffe2 | d7d147b648fb678dd2d2b33be14cef66bb5252d0.cu | #include <cfloat>
#include "caffe2/core/context_gpu.h"
// #include "caffe2/operators/top_k_heap_selection.cuh"
#include "caffe2/operators/top_k_radix_selection.cuh"
#include "caffe2/utils/math.h"
#include "select_bottom_n_single_op.h"
namespace caffe2 {
namespace {
template <typename TIndex>
__global__ void SetIndex(const int nthreads,
TIndex* output) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// just set the index
output[index] = static_cast<TIndex>(index);
}
}
// Does not work when K is larger than 512
// template <typename T, int kHeapSize, bool kSelectMax = false>
// void RunHeapSelectionImpl(
// const T* input,
// const TIndex outer_size,
// const TIndex inner_size,
// const int k,
// T* values,
// TIndex* indices,
// CUDAContext* context) {
// constexpr int kBlockSize = 256;
// constexpr int kNumWarps = kBlockSize / kWarpSize;
// constexpr int smem = kNumWarps * kHeapSize * (sizeof(T) + sizeof(TIndex));
// constexpr T kInitVal = kSelectMax ? std::numeric_limits<T>::lowest()
// : std::numeric_limits<T>::max();
// selectRowsViaHeap<T, TIndex, TIndex, kBlockSize, kHeapSize, kSelectMax>
// <<<outer_size, kBlockSize, smem, context->cuda_stream()>>>(
// input,
// values,
// indices,
// kInitVal,
// std::numeric_limits<TIndex>::max(),
// outer_size,
// inner_size,
// k);
// }
// Stupid that it only works when selecting the Bottom K
template <typename T, bool kSelectMax = false>
void RunRadixSelectionImpl(
const T* input,
const TIndex outer_size,
const TIndex inner_size,
const int k,
T* values,
TIndex* indices,
CUDAContext* context) {
const int block = std::min(
math::roundUp(static_cast<int>(inner_size), kWarpSize), CAFFE_CUDA_NUM_THREADS);
gatherTopK<T, kSelectMax, TIndex>
<<<outer_size, block, 0, context->cuda_stream()>>>(
input, inner_size, k, outer_size, values, indices);
}
} // namespace
template<>
bool SelectBottomNSingleOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data
DCHECK_EQ(X.ndim(), 4);
const int num_images = X.dim32(0);
DCHECK_LT(im_, num_images);
const int num_probs = X.dim32(1) * X.dim32(2) * X.dim32(3);
const float* Xp = X.data<float>() + im_ * num_probs;
auto* Yi = Output(0);
auto* Yv = Output(1);
if (num_probs <= top_n_) {
// just select everything
Yi->Resize(num_probs);
Yv->Resize(num_probs);
SetIndex<TIndex><<<CAFFE_GET_BLOCKS(num_probs), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(num_probs,
Yi->mutable_data<TIndex>());
context_.Copy<float, CUDAContext, CUDAContext>(num_probs, Xp,
Yv->mutable_data<float>());
return true;
}
Yi->Resize(top_n_);
Yv->Resize(top_n_);
// do the top_k selection thing, heap sort seems not working
// RunHeapSelectionImpl<float, 1024>(Xp,
// 1,
// num_probs,
// top_n_,
// Yv->mutable_data<float>(),
// Yi->mutable_data<TIndex>(),
// &context_);
RunRadixSelectionImpl<float>(Xp,
1,
num_probs,
top_n_,
Yv->mutable_data<float>(),
Yi->mutable_data<TIndex>(),
&context_);
return true;
}
REGISTER_CUDA_OPERATOR(SelectBottomNSingle,
SelectBottomNSingleOp<float, CUDAContext>);
} // namespace caffe2 |
fdfc1eb819f2d8a9a1d2c0aa5fae2ad5131d8f25.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "main.h"
#define SIZE 20
int main(void) {
int *da, *db, *dc, *ha, *hb, *hc;
hipMalloc(&da, SIZE * sizeof(int));
hipMalloc(&db, SIZE * sizeof(int));
hipMalloc(&dc, SIZE * sizeof(int));
ha = (int*)malloc(SIZE * sizeof(int));
hb = (int*)malloc(SIZE * sizeof(int));
hc = (int*)malloc(SIZE * sizeof(int));
for (int i = 0; i < SIZE; i ++) {
ha[i] = i;
hb[i] = i;
hc[i] = 0;
}
hipMemcpy(da, ha, SIZE * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(db, hb, SIZE * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(hc, dc, SIZE * sizeof(int), hipMemcpyDeviceToHost);
hipFree(da);
hipFree(db);
hipFree(dc);
free(ha);
free(hb);
free(hc);
Timer timer;
timer.start(L"Reading from file ...");
unsigned int numlines = 0;
Textfile wordlist(L"input.txt");
unsigned int numberstrings = wordlist.countlines();
timer.stop();
timer.start(L"Sorting with Burstsort");
Burstsort<Minuscule> bs;
for (unsigned int i = 0; i < numberstrings; i++)
bs.insert((wchar_t*)wordlist.getline(i));
bs.sort();
timer.stop();
bs.print(true, false);
bs.clear();
wordlist.close();
return 0;
} | fdfc1eb819f2d8a9a1d2c0aa5fae2ad5131d8f25.cu | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "main.h"
#define SIZE 20
int main(void) {
int *da, *db, *dc, *ha, *hb, *hc;
cudaMalloc(&da, SIZE * sizeof(int));
cudaMalloc(&db, SIZE * sizeof(int));
cudaMalloc(&dc, SIZE * sizeof(int));
ha = (int*)malloc(SIZE * sizeof(int));
hb = (int*)malloc(SIZE * sizeof(int));
hc = (int*)malloc(SIZE * sizeof(int));
for (int i = 0; i < SIZE; i ++) {
ha[i] = i;
hb[i] = i;
hc[i] = 0;
}
cudaMemcpy(da, ha, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(db, hb, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(hc, dc, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
free(ha);
free(hb);
free(hc);
Timer timer;
timer.start(L"Reading from file ...");
unsigned int numlines = 0;
Textfile wordlist(L"input.txt");
unsigned int numberstrings = wordlist.countlines();
timer.stop();
timer.start(L"Sorting with Burstsort");
Burstsort<Minuscule> bs;
for (unsigned int i = 0; i < numberstrings; i++)
bs.insert((wchar_t*)wordlist.getline(i));
bs.sort();
timer.stop();
bs.print(true, false);
bs.clear();
wordlist.close();
return 0;
} |
2155d3956c4f6360e5764628f39ea60b379b6d1f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hipfft.h>
#include <omp.h>
#include "cuda_help.h"
#include "gpu_comms.h"
void init_GPU_peer(int p, int tid, hipStream_t *streams){
// enable peer to peer.
for (int dd = 0; dd < p; dd++) {
int access = 0;
hipDeviceCanAccessPeer(&access, tid, dd);
if (access){
hipDeviceEnablePeerAccess(dd, 0);
cudaCheckError();
}
}
for (int s = 0; s < NUM_STREAMS; s++) {
hipStreamCreateWithFlags(&streams[s], hipStreamNonBlocking);
cudaCheckError();
}
}
void finalize_GPU_peer(hipStream_t *streams){
for (int s = 0; s < NUM_STREAMS; s++) {
hipStreamDestroy(streams[s]);
cudaCheckError();
}
}
| 2155d3956c4f6360e5764628f39ea60b379b6d1f.cu | #include <stdio.h>
#include <stdlib.h>
#include <cufft.h>
#include <omp.h>
#include "cuda_help.h"
#include "gpu_comms.h"
void init_GPU_peer(int p, int tid, cudaStream_t *streams){
// enable peer to peer.
for (int dd = 0; dd < p; dd++) {
int access = 0;
cudaDeviceCanAccessPeer(&access, tid, dd);
if (access){
cudaDeviceEnablePeerAccess(dd, 0);
cudaCheckError();
}
}
for (int s = 0; s < NUM_STREAMS; s++) {
cudaStreamCreateWithFlags(&streams[s], cudaStreamNonBlocking);
cudaCheckError();
}
}
void finalize_GPU_peer(cudaStream_t *streams){
for (int s = 0; s < NUM_STREAMS; s++) {
cudaStreamDestroy(streams[s]);
cudaCheckError();
}
}
|
cf980705a2b4c57759f93ab654fe7489cce6ff9c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpumanager.h"
//The cluster initizalization of kmeans
__global__ void kMeansInit(float *redBuffer, const float *greenBuffer, const float *blueBuffer, const unsigned int size, const int numOfClusters, float *centroids,
float *newCentroids, int *centroidsQuantity, const unsigned int *randomSeeds)
{
int globalIdx = (blockIdx.x * blockDim.x) + threadIdx.x;
const int DIM = 3;
const int indX = globalIdx * DIM;
const int indY = indX + 1;
const int indZ = indY + 1;
int random;
// choose more centers
if (globalIdx < numOfClusters) {
// Getting a random index within each size / number of clusters wide interval
random = globalIdx * (size / numOfClusters) + (randomSeeds[globalIdx] % (size / numOfClusters));
centroids[indX] = redBuffer[random];
centroids[indY] = greenBuffer[random];
centroids[indZ] = blueBuffer[random];
}
centroidsQuantity[globalIdx] = 0;
newCentroids[globalIdx] = 0;
}
// One iteration of kmeans
__global__ void kMeansIteration(float *redBuffer, const float *greenBuffer, const float *blueBuffer, unsigned short *labels, const unsigned int size, const int numOfClusters,
float *centroids)
{
int globalIdx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (globalIdx >= size)
return;
const int DIM = 3;
int localIdx = threadIdx.x;
__shared__ float centroidsLocal[DIM * numOfClusters];
// Copying the centroids of the clusters to shared memory for increased speed
if (localIdx < DIM * numOfClusters) {
centroidsLocal[localIdx] = centroids[localIdx];
}
__syncthreads();
float distance, newDistance, x, y, z;
int centroidsIdx = 0;
x = redBuffer[globalIdx] - centroidsLocal[0];
y = greenBuffer[globalIdx] - centroidsLocal[1];
z = blueBuffer[globalIdx] - centroidsLocal[2];
distance = x * x + y * y + z * z;
// look for a smaller distance in the rest of centroids
for (int j = 1; j < numOfClusters; j++) {
x = redBuffer[globalIdx] - centroidsLocal[j * DIM];
y = greenBuffer[globalIdx] - centroidsLocal[j * DIM + 1];
z = blueBuffer[globalIdx] - centroidsLocal[j * DIM + 2];
newDistance = x * x + y * y + z * z;
if (newDistance < distance) {
centroidsIdx = j;
distance = newDistance;
}
}
labels[globalIdx] = centroidsIdx;
}
// The original kmeans algorithm
__global__ void kMeans(float *redBuffer, const float *greenBuffer, const float *blueBuffer, unsigned short *labels, const unsigned int size, const int numOfClusters,
float *centroids, float *newCentroids, int *centroidsQuantity, float *distanceAccumulation, const unsigned int *randomSeeds)
{
// get index into global data array
int globalIdx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (globalIdx >= size)
return;
// num of dimensions
const int DIM = 3;
const int indX = globalIdx * DIM;
const int indY = indX + 1;
const int indZ = indY + 1;
float *tempPtr;
float distance, newDistance, x, y, z;
int random;
// choose more centers
if (globalIdx < numOfClusters) {
// Getting a random index within each size / number of clusters wide interval
random = globalIdx * (size / numOfClusters) + (randomSeeds[globalIdx] % (size / numOfClusters));
centroids[indX] = redBuffer[random];
centroids[indY] = greenBuffer[random];
centroids[indZ] = blueBuffer[random];
}
//////////////////////////////////////////////////////////////////////////
// Synchronize to make sure all threads are done
__syncthreads();
//////////////////////////////////////////////////////////////////////////
const float epsilon = 1e-4f;
int centroidsIdx;
int numOfIterations = 0;
// Until 5 iterations
while (numOfIterations < 5) {
// Empty all clusters before classification
if (globalIdx >= 0 && globalIdx < numOfClusters) {
centroidsQuantity[globalIdx] = 0;
}
if (globalIdx >= 0 && globalIdx < numOfClusters * DIM) {
newCentroids[globalIdx] = 0;
}
//////////////////////////////////////////////////////////////////////////
// Synchronize to make sure all threads are done
__syncthreads();
//////////////////////////////////////////////////////////////////////////
// Use the estimated means to classify the samples into K clusters
// estimate the distance between colors[Idx] and centroids[0]
centroidsIdx = 0;
x = redBuffer[globalIdx] - centroids[0];
y = greenBuffer[globalIdx] - centroids[1];
z = blueBuffer[globalIdx] - centroids[2];
distance = x * x + y * y + z * z;
// look for a smaller distance in the rest of centroids
for (int j = 1; j < numOfClusters; j++) {
x = redBuffer[globalIdx] - centroids[j * DIM];
y = greenBuffer[globalIdx] - centroids[j * DIM + 1];
z = blueBuffer[globalIdx] - centroids[j * DIM + 2];
newDistance = x * x + y * y + z * z;
if (newDistance < distance) {
centroidsIdx = j;
distance = newDistance;
}
}
labels[globalIdx] = centroidsIdx;
centroidsQuantity[centroidsIdx]++;
newCentroids[centroidsIdx * DIM] += redBuffer[globalIdx];
newCentroids[centroidsIdx * DIM + 1] += greenBuffer[globalIdx];
newCentroids[centroidsIdx * DIM + 2] += blueBuffer[globalIdx];
////////////////////////////////////////////////////////////////////////
//Synchronize to make sure all threads are done
__syncthreads();
////////////////////////////////////////////////////////////////////////
if (globalIdx >= 0 && globalIdx < numOfClusters) {
// estimate the values of the new centers
if (centroidsQuantity[globalIdx] > 0) {
newCentroids[indX] /= centroidsQuantity[globalIdx];
newCentroids[indY] /= centroidsQuantity[globalIdx];
newCentroids[indZ] /= centroidsQuantity[globalIdx];
}
newDistance
= fabs(centroids[indX] - newCentroids[indX])
+ fabs(centroids[indY] - newCentroids[indY])
+ fabs(centroids[indZ] - newCentroids[indZ]);
if (distance > epsilon) {
distanceAccumulation[globalIdx] += 1;
}
}
////////////////////////////////////////////////////////////////////////
//Synchronize to make sure all threads are done
__syncthreads();
////////////////////////////////////////////////////////////////////////
if (globalIdx == 0) {
// swap the new centroids with the old ones
tempPtr = centroids;
centroids = newCentroids;
newCentroids = tempPtr;
}
++numOfIterations;
__syncthreads();
}
}
// Replacing the colors with the center of their cluster
__global__ void convertToKRangeColors(float *redBuffer, float *greenBuffer, float *blueBuffer, const unsigned long int size, float *centroids, unsigned short *labels)
{
int globalIdx = (blockIdx.x * blockDim.x) + threadIdx.x;
if(globalIdx >= size)
return;
redBuffer[globalIdx] = centroids[labels[globalIdx] * 3];
greenBuffer[globalIdx] = centroids[labels[globalIdx] * 3 + 1];
blueBuffer[globalIdx] = centroids[labels[globalIdx] * 3 + 2];
}
GpuManager::GpuManager() : numOfClusters(256)
{
}
// Cleaning memory on GPU
void GpuManager::clearBuffers()
{
hipFree(this->dRedBuffer);
hipFree(this->dGreenBuffer);
hipFree(this->dBlueBuffer);
hipFree(this->centroids);
hipFree(this->newCentroids);
hipFree(this->centroidsQuantity);
hipFree(this->distanceAccumulation);
hipFree(this->dSeeds);
hipFree(this->labels);
}
// Allocating memory on GPU
void GpuManager::initBuffers()
{
hipMalloc((void **) &this->dRedBuffer, this->size * sizeof(float));
hipMalloc((void **) &this->dGreenBuffer, this->size * sizeof(float));
hipMalloc((void **) &this->dBlueBuffer, this->size * sizeof(float));
hipMalloc((void **) &this->centroids, 3 * this->numOfClusters * sizeof(float));
hipMalloc((void **) &this->newCentroids, 3 * this->numOfClusters * sizeof(float));
hipMalloc((void **) &this->centroidsQuantity, this->numOfClusters * sizeof(int));
hipMalloc((void **) &this->distanceAccumulation, this->size * sizeof(float));
hipMalloc((void **) &this->dSeeds, this->numOfClusters * sizeof(unsigned int));
hipMalloc((void **) &this->labels, this->size * sizeof(unsigned int));
}
void GpuManager::setSize(const unsigned int inputSize)
{
this->size = inputSize;
}
// Copy the color data from host memory to device memory
void GpuManager::copyDataImageToDevice(float *redBuffer, float *greenBuffer, float *blueBuffer)
{
hipMemcpy(this->dRedBuffer, redBuffer, this->size * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(this->dGreenBuffer, greenBuffer, this->size * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(this->dBlueBuffer, blueBuffer, this->size * sizeof(float), hipMemcpyHostToDevice);
}
//Generate and copy random numbers for initial seeds to device memory
void GpuManager::generateAndCopyRandomSeedsToDevice()
{
unsigned int *seeds == new unsigned int[this->gpuManager.getNumOfClusters()];
srand( (unsigned)time( NULL ) );
for (int j = 0; j < this->gpuManager.getNumOfClusters(); ++j) {
seeds[j] = rand();
}
hipMemcpy(this->dSeeds, seeds, this->numOfClusters * sizeof(unsigned int), hipMemcpyHostToDevice);
delete[] seeds;
}
// Copy the color data from device memory to host memory
void GpuManager::copyDataFromDevice(float *redBuffer, float *greenBuffer, float *blueBuffer)
{
hipMemcpy(redBuffer, this->dRedBuffer, this->size * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(greenBuffer, this->dGreenBuffer, this->size * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(blueBuffer, this->dBlueBuffer, this->size * sizeof(float), hipMemcpyDeviceToHost);
}
// Copy the colorpalette and the clusterindex of each pixel to host memory
void GpuManager::copyImageDataFromDevice(float *palette, unsigned short *indexes)
{
hipMemcpy(palette, this->centroids, 3 * this->numOfClusters * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(indexes, this->labels, this->size * sizeof(unsigned short), hipMemcpyDeviceToHost);
}
// Executing KMeans and Clustering kernel algorithms
void GpuManager::quantizeImage()
{
// The number of threads within a block (hardware limit is 512 on old GPUs, 1024 on new)
const dim3 blockSize(1024);
// The number of blocks, each will launch a 1024 threads
const dim3 gridSize(this->size/1024 + 1);
hipLaunchKernelGGL(( kMeansInit), dim3(8), dim3(32), 0, 0, this->dRedBuffer, this->dGreenBuffer, this->dBlueBuffer, this->size, this->numOfClusters, this->centroids, this->dSeeds);
// kMeans<<<gridSize, blockSize>>>(this->dRedBuffer, this->dGreenBuffer, this->dBlueBuffer, this->labels, this->size, this->numOfClusters,
// this->centroids, this->newCentroids, this->centroidsQuantity, this->distanceAccumulation, this->dSeeds);
hipDeviceSynchronize();
hipLaunchKernelGGL(( kMeansIteration), dim3(gridSize), dim3(blockSize), 0, 0, this->dRedBuffer, this->dGreenBuffer, this->dBlueBuffer, this->labels, this->size, this->numOfClusters,
this->centroids, this->newCentroids, this->centroidsQuantity);
hipDeviceSynchronize();
//convertToKRangeColors<<<gridSize, blockSize>>>(this->dRedBuffer, this->dGreenBuffer, this->dBlueBuffer, this->size, this->centroids, this->labels);
}
int GpuManager::getNumOfClusters()
{
return this->numOfClusters;
}
| cf980705a2b4c57759f93ab654fe7489cce6ff9c.cu | #include "gpumanager.h"
//The cluster initizalization of kmeans
__global__ void kMeansInit(float *redBuffer, const float *greenBuffer, const float *blueBuffer, const unsigned int size, const int numOfClusters, float *centroids,
float *newCentroids, int *centroidsQuantity, const unsigned int *randomSeeds)
{
int globalIdx = (blockIdx.x * blockDim.x) + threadIdx.x;
const int DIM = 3;
const int indX = globalIdx * DIM;
const int indY = indX + 1;
const int indZ = indY + 1;
int random;
// choose more centers
if (globalIdx < numOfClusters) {
// Getting a random index within each size / number of clusters wide interval
random = globalIdx * (size / numOfClusters) + (randomSeeds[globalIdx] % (size / numOfClusters));
centroids[indX] = redBuffer[random];
centroids[indY] = greenBuffer[random];
centroids[indZ] = blueBuffer[random];
}
centroidsQuantity[globalIdx] = 0;
newCentroids[globalIdx] = 0;
}
// One iteration of kmeans
__global__ void kMeansIteration(float *redBuffer, const float *greenBuffer, const float *blueBuffer, unsigned short *labels, const unsigned int size, const int numOfClusters,
float *centroids)
{
int globalIdx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (globalIdx >= size)
return;
const int DIM = 3;
int localIdx = threadIdx.x;
__shared__ float centroidsLocal[DIM * numOfClusters];
// Copying the centroids of the clusters to shared memory for increased speed
if (localIdx < DIM * numOfClusters) {
centroidsLocal[localIdx] = centroids[localIdx];
}
__syncthreads();
float distance, newDistance, x, y, z;
int centroidsIdx = 0;
x = redBuffer[globalIdx] - centroidsLocal[0];
y = greenBuffer[globalIdx] - centroidsLocal[1];
z = blueBuffer[globalIdx] - centroidsLocal[2];
distance = x * x + y * y + z * z;
// look for a smaller distance in the rest of centroids
for (int j = 1; j < numOfClusters; j++) {
x = redBuffer[globalIdx] - centroidsLocal[j * DIM];
y = greenBuffer[globalIdx] - centroidsLocal[j * DIM + 1];
z = blueBuffer[globalIdx] - centroidsLocal[j * DIM + 2];
newDistance = x * x + y * y + z * z;
if (newDistance < distance) {
centroidsIdx = j;
distance = newDistance;
}
}
labels[globalIdx] = centroidsIdx;
}
// The original kmeans algorithm
__global__ void kMeans(float *redBuffer, const float *greenBuffer, const float *blueBuffer, unsigned short *labels, const unsigned int size, const int numOfClusters,
float *centroids, float *newCentroids, int *centroidsQuantity, float *distanceAccumulation, const unsigned int *randomSeeds)
{
// get index into global data array
int globalIdx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (globalIdx >= size)
return;
// num of dimensions
const int DIM = 3;
const int indX = globalIdx * DIM;
const int indY = indX + 1;
const int indZ = indY + 1;
float *tempPtr;
float distance, newDistance, x, y, z;
int random;
// choose more centers
if (globalIdx < numOfClusters) {
// Getting a random index within each size / number of clusters wide interval
random = globalIdx * (size / numOfClusters) + (randomSeeds[globalIdx] % (size / numOfClusters));
centroids[indX] = redBuffer[random];
centroids[indY] = greenBuffer[random];
centroids[indZ] = blueBuffer[random];
}
//////////////////////////////////////////////////////////////////////////
// Synchronize to make sure all threads are done
__syncthreads();
//////////////////////////////////////////////////////////////////////////
const float epsilon = 1e-4f;
int centroidsIdx;
int numOfIterations = 0;
// Until 5 iterations
while (numOfIterations < 5) {
// Empty all clusters before classification
if (globalIdx >= 0 && globalIdx < numOfClusters) {
centroidsQuantity[globalIdx] = 0;
}
if (globalIdx >= 0 && globalIdx < numOfClusters * DIM) {
newCentroids[globalIdx] = 0;
}
//////////////////////////////////////////////////////////////////////////
// Synchronize to make sure all threads are done
__syncthreads();
//////////////////////////////////////////////////////////////////////////
// Use the estimated means to classify the samples into K clusters
// estimate the distance between colors[Idx] and centroids[0]
centroidsIdx = 0;
x = redBuffer[globalIdx] - centroids[0];
y = greenBuffer[globalIdx] - centroids[1];
z = blueBuffer[globalIdx] - centroids[2];
distance = x * x + y * y + z * z;
// look for a smaller distance in the rest of centroids
for (int j = 1; j < numOfClusters; j++) {
x = redBuffer[globalIdx] - centroids[j * DIM];
y = greenBuffer[globalIdx] - centroids[j * DIM + 1];
z = blueBuffer[globalIdx] - centroids[j * DIM + 2];
newDistance = x * x + y * y + z * z;
if (newDistance < distance) {
centroidsIdx = j;
distance = newDistance;
}
}
labels[globalIdx] = centroidsIdx;
centroidsQuantity[centroidsIdx]++;
newCentroids[centroidsIdx * DIM] += redBuffer[globalIdx];
newCentroids[centroidsIdx * DIM + 1] += greenBuffer[globalIdx];
newCentroids[centroidsIdx * DIM + 2] += blueBuffer[globalIdx];
////////////////////////////////////////////////////////////////////////
//Synchronize to make sure all threads are done
__syncthreads();
////////////////////////////////////////////////////////////////////////
if (globalIdx >= 0 && globalIdx < numOfClusters) {
// estimate the values of the new centers
if (centroidsQuantity[globalIdx] > 0) {
newCentroids[indX] /= centroidsQuantity[globalIdx];
newCentroids[indY] /= centroidsQuantity[globalIdx];
newCentroids[indZ] /= centroidsQuantity[globalIdx];
}
newDistance
= fabs(centroids[indX] - newCentroids[indX])
+ fabs(centroids[indY] - newCentroids[indY])
+ fabs(centroids[indZ] - newCentroids[indZ]);
if (distance > epsilon) {
distanceAccumulation[globalIdx] += 1;
}
}
////////////////////////////////////////////////////////////////////////
//Synchronize to make sure all threads are done
__syncthreads();
////////////////////////////////////////////////////////////////////////
if (globalIdx == 0) {
// swap the new centroids with the old ones
tempPtr = centroids;
centroids = newCentroids;
newCentroids = tempPtr;
}
++numOfIterations;
__syncthreads();
}
}
// Replacing the colors with the center of their cluster
__global__ void convertToKRangeColors(float *redBuffer, float *greenBuffer, float *blueBuffer, const unsigned long int size, float *centroids, unsigned short *labels)
{
int globalIdx = (blockIdx.x * blockDim.x) + threadIdx.x;
if(globalIdx >= size)
return;
redBuffer[globalIdx] = centroids[labels[globalIdx] * 3];
greenBuffer[globalIdx] = centroids[labels[globalIdx] * 3 + 1];
blueBuffer[globalIdx] = centroids[labels[globalIdx] * 3 + 2];
}
GpuManager::GpuManager() : numOfClusters(256)
{
}
// Cleaning memory on GPU
void GpuManager::clearBuffers()
{
cudaFree(this->dRedBuffer);
cudaFree(this->dGreenBuffer);
cudaFree(this->dBlueBuffer);
cudaFree(this->centroids);
cudaFree(this->newCentroids);
cudaFree(this->centroidsQuantity);
cudaFree(this->distanceAccumulation);
cudaFree(this->dSeeds);
cudaFree(this->labels);
}
// Allocating memory on GPU
void GpuManager::initBuffers()
{
cudaMalloc((void **) &this->dRedBuffer, this->size * sizeof(float));
cudaMalloc((void **) &this->dGreenBuffer, this->size * sizeof(float));
cudaMalloc((void **) &this->dBlueBuffer, this->size * sizeof(float));
cudaMalloc((void **) &this->centroids, 3 * this->numOfClusters * sizeof(float));
cudaMalloc((void **) &this->newCentroids, 3 * this->numOfClusters * sizeof(float));
cudaMalloc((void **) &this->centroidsQuantity, this->numOfClusters * sizeof(int));
cudaMalloc((void **) &this->distanceAccumulation, this->size * sizeof(float));
cudaMalloc((void **) &this->dSeeds, this->numOfClusters * sizeof(unsigned int));
cudaMalloc((void **) &this->labels, this->size * sizeof(unsigned int));
}
void GpuManager::setSize(const unsigned int inputSize)
{
this->size = inputSize;
}
// Copy the color data from host memory to device memory
void GpuManager::copyDataImageToDevice(float *redBuffer, float *greenBuffer, float *blueBuffer)
{
cudaMemcpy(this->dRedBuffer, redBuffer, this->size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(this->dGreenBuffer, greenBuffer, this->size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(this->dBlueBuffer, blueBuffer, this->size * sizeof(float), cudaMemcpyHostToDevice);
}
//Generate and copy random numbers for initial seeds to device memory
void GpuManager::generateAndCopyRandomSeedsToDevice()
{
unsigned int *seeds == new unsigned int[this->gpuManager.getNumOfClusters()];
srand( (unsigned)time( NULL ) );
for (int j = 0; j < this->gpuManager.getNumOfClusters(); ++j) {
seeds[j] = rand();
}
cudaMemcpy(this->dSeeds, seeds, this->numOfClusters * sizeof(unsigned int), cudaMemcpyHostToDevice);
delete[] seeds;
}
// Copy the color data from device memory to host memory
void GpuManager::copyDataFromDevice(float *redBuffer, float *greenBuffer, float *blueBuffer)
{
cudaMemcpy(redBuffer, this->dRedBuffer, this->size * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(greenBuffer, this->dGreenBuffer, this->size * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(blueBuffer, this->dBlueBuffer, this->size * sizeof(float), cudaMemcpyDeviceToHost);
}
// Copy the colorpalette and the clusterindex of each pixel to host memory
void GpuManager::copyImageDataFromDevice(float *palette, unsigned short *indexes)
{
cudaMemcpy(palette, this->centroids, 3 * this->numOfClusters * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(indexes, this->labels, this->size * sizeof(unsigned short), cudaMemcpyDeviceToHost);
}
// Executing KMeans and Clustering kernel algorithms
void GpuManager::quantizeImage()
{
// The number of threads within a block (hardware limit is 512 on old GPUs, 1024 on new)
const dim3 blockSize(1024);
// The number of blocks, each will launch a 1024 threads
const dim3 gridSize(this->size/1024 + 1);
kMeansInit<<<8, 32>>>(this->dRedBuffer, this->dGreenBuffer, this->dBlueBuffer, this->size, this->numOfClusters, this->centroids, this->dSeeds);
// kMeans<<<gridSize, blockSize>>>(this->dRedBuffer, this->dGreenBuffer, this->dBlueBuffer, this->labels, this->size, this->numOfClusters,
// this->centroids, this->newCentroids, this->centroidsQuantity, this->distanceAccumulation, this->dSeeds);
cudaDeviceSynchronize();
kMeansIteration<<<gridSize, blockSize>>>(this->dRedBuffer, this->dGreenBuffer, this->dBlueBuffer, this->labels, this->size, this->numOfClusters,
this->centroids, this->newCentroids, this->centroidsQuantity);
cudaDeviceSynchronize();
//convertToKRangeColors<<<gridSize, blockSize>>>(this->dRedBuffer, this->dGreenBuffer, this->dBlueBuffer, this->size, this->centroids, this->labels);
}
int GpuManager::getNumOfClusters()
{
return this->numOfClusters;
}
|
ead9c2a34e872d497fee444c953f4db428dfa4fb.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <matrixmul_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
int errorM = 0, errorN = 0;
srand(52);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
M = AllocateMatrix(rand() % 1024, rand() % 1024, 1);
N = AllocateMatrix(M.width, rand() % 1024, 1);
P = AllocateMatrix(M.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = NULL; //(int*)malloc(3 * sizeof(int));
unsigned int data_read = 3;
cutReadFilei(argv[1], ¶ms, &data_read, true);
if(data_read != 3){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(params[0], params[1], 0);
N = AllocateMatrix(params[1], params[2], 0);
P = AllocateMatrix(params[0], params[2], 0);
errorM = ReadFile(&M, argv[2]);
errorN = ReadFile(&N, argv[3]);
if(errorM || errorN )
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
printf("GPU computation complete\n");
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
printf("CPU computation complete\n");
// in this case check if the result is equivalent to the expected soluion
CUTBoolean res = cutComparefe(reference.elements, P.elements,
P.height*P.width, 0.001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
// Launch the device computation threads!
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height*M->width;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return (data_read != (M->height * M->width));
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height,
0.0001f);
}
| ead9c2a34e872d497fee444c953f4db428dfa4fb.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <matrixmul_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
int errorM = 0, errorN = 0;
srand(52);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
M = AllocateMatrix(rand() % 1024, rand() % 1024, 1);
N = AllocateMatrix(M.width, rand() % 1024, 1);
P = AllocateMatrix(M.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = NULL; //(int*)malloc(3 * sizeof(int));
unsigned int data_read = 3;
cutReadFilei(argv[1], ¶ms, &data_read, true);
if(data_read != 3){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(params[0], params[1], 0);
N = AllocateMatrix(params[1], params[2], 0);
P = AllocateMatrix(params[0], params[2], 0);
errorM = ReadFile(&M, argv[2]);
errorN = ReadFile(&N, argv[3]);
if(errorM || errorN )
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
printf("GPU computation complete\n");
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
printf("CPU computation complete\n");
// in this case check if the result is equivalent to the expected soluion
CUTBoolean res = cutComparefe(reference.elements, P.elements,
P.height*P.width, 0.001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
// Launch the device computation threads!
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height*M->width;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return (data_read != (M->height * M->width));
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height,
0.0001f);
}
|
47a20649bcbcd19915aa825d1a4b7577757a837f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_calc_gjL_2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int layer_id = 1;
int *l = NULL;
hipMalloc(&l, XSIZE*YSIZE);
int *s_ext = NULL;
hipMalloc(&s_ext, XSIZE*YSIZE);
int *sw_ext = NULL;
hipMalloc(&sw_ext, XSIZE*YSIZE);
float *z_ext_arr = NULL;
hipMalloc(&z_ext_arr, XSIZE*YSIZE);
float *a_ext_arr = NULL;
hipMalloc(&a_ext_arr, XSIZE*YSIZE);
float *t_arr = NULL;
hipMalloc(&t_arr, XSIZE*YSIZE);
float *gjl_ext = NULL;
hipMalloc(&gjl_ext, XSIZE*YSIZE);
float *w_ext_arr = NULL;
hipMalloc(&w_ext_arr, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_calc_gjL_2), dim3(gridBlock),dim3(threadBlock), 0, 0, layer_id,l,s_ext,sw_ext,z_ext_arr,a_ext_arr,t_arr,gjl_ext,w_ext_arr);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_calc_gjL_2), dim3(gridBlock),dim3(threadBlock), 0, 0, layer_id,l,s_ext,sw_ext,z_ext_arr,a_ext_arr,t_arr,gjl_ext,w_ext_arr);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_calc_gjL_2), dim3(gridBlock),dim3(threadBlock), 0, 0, layer_id,l,s_ext,sw_ext,z_ext_arr,a_ext_arr,t_arr,gjl_ext,w_ext_arr);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 47a20649bcbcd19915aa825d1a4b7577757a837f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_calc_gjL_2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int layer_id = 1;
int *l = NULL;
cudaMalloc(&l, XSIZE*YSIZE);
int *s_ext = NULL;
cudaMalloc(&s_ext, XSIZE*YSIZE);
int *sw_ext = NULL;
cudaMalloc(&sw_ext, XSIZE*YSIZE);
float *z_ext_arr = NULL;
cudaMalloc(&z_ext_arr, XSIZE*YSIZE);
float *a_ext_arr = NULL;
cudaMalloc(&a_ext_arr, XSIZE*YSIZE);
float *t_arr = NULL;
cudaMalloc(&t_arr, XSIZE*YSIZE);
float *gjl_ext = NULL;
cudaMalloc(&gjl_ext, XSIZE*YSIZE);
float *w_ext_arr = NULL;
cudaMalloc(&w_ext_arr, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_calc_gjL_2<<<gridBlock,threadBlock>>>(layer_id,l,s_ext,sw_ext,z_ext_arr,a_ext_arr,t_arr,gjl_ext,w_ext_arr);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_calc_gjL_2<<<gridBlock,threadBlock>>>(layer_id,l,s_ext,sw_ext,z_ext_arr,a_ext_arr,t_arr,gjl_ext,w_ext_arr);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_calc_gjL_2<<<gridBlock,threadBlock>>>(layer_id,l,s_ext,sw_ext,z_ext_arr,a_ext_arr,t_arr,gjl_ext,w_ext_arr);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d2b2ce2438d8609313ab2f5e448144856b8410fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_scalarMul (int n, double *result, double x, double *y)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = x * y[id];
}
} | d2b2ce2438d8609313ab2f5e448144856b8410fa.cu | #include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_scalarMul (int n, double *result, double x, double *y)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = x * y[id];
}
} |
23429fe59dcbe61be70607a4e47b3208fc78c1e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2013-2014, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "convert/oskar_convert_station_uvw_to_baseline_uvw_cuda.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Kernel wrappers. ======================================================== */
/* Single precision. */
void oskar_convert_station_uvw_to_baseline_uvw_cuda_f(int num_stations,
const float* d_u, const float* d_v, const float* d_w, float* d_uu,
float* d_vv, float* d_ww)
{
int num_threads = 32;
oskar_convert_station_uvw_to_baseline_uvw_cudak_f
OSKAR_CUDAK_CONF(num_stations, num_threads)
(num_stations, d_u, d_v, d_w, d_uu, d_vv, d_ww);
}
/* Double precision. */
void oskar_convert_station_uvw_to_baseline_uvw_cuda_d(int num_stations,
const double* d_u, const double* d_v, const double* d_w, double* d_uu,
double* d_vv, double* d_ww)
{
int num_threads = 32;
oskar_convert_station_uvw_to_baseline_uvw_cudak_d
OSKAR_CUDAK_CONF(num_stations, num_threads)
(num_stations, d_u, d_v, d_w, d_uu, d_vv, d_ww);
}
/* Kernels. ================================================================ */
/* Single precision. */
__global__
void oskar_convert_station_uvw_to_baseline_uvw_cudak_f(int num_stations,
const float* u, const float* v, const float* w, float* uu,
float* vv, float* ww)
{
/* Get first station index from block ID. */
int s1 = blockIdx.x;
/* Each thread does one baseline. */
for (int s2 = s1 + threadIdx.x + 1; s2 < num_stations; s2 += blockDim.x)
{
/* Determine baseline index from station IDs. */
int b = s1 * (num_stations - 1) - (s1 - 1) * s1/2 + s2 - s1 - 1;
/* Compute baselines. */
uu[b] = u[s2] - u[s1];
vv[b] = v[s2] - v[s1];
ww[b] = w[s2] - w[s1];
}
}
/* Double precision. */
__global__
void oskar_convert_station_uvw_to_baseline_uvw_cudak_d(int num_stations,
const double* u, const double* v, const double* w, double* uu,
double* vv, double* ww)
{
/* Get first station index from block ID. */
int s1 = blockIdx.x;
/* Each thread does one baseline. */
for (int s2 = s1 + threadIdx.x + 1; s2 < num_stations; s2 += blockDim.x)
{
/* Determine baseline index from station IDs. */
int b = s1 * (num_stations - 1) - (s1 - 1) * s1/2 + s2 - s1 - 1;
/* Compute baselines. */
uu[b] = u[s2] - u[s1];
vv[b] = v[s2] - v[s1];
ww[b] = w[s2] - w[s1];
}
}
#ifdef __cplusplus
}
#endif
| 23429fe59dcbe61be70607a4e47b3208fc78c1e9.cu | /*
* Copyright (c) 2013-2014, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "convert/oskar_convert_station_uvw_to_baseline_uvw_cuda.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Kernel wrappers. ======================================================== */
/* Single precision. */
void oskar_convert_station_uvw_to_baseline_uvw_cuda_f(int num_stations,
const float* d_u, const float* d_v, const float* d_w, float* d_uu,
float* d_vv, float* d_ww)
{
int num_threads = 32;
oskar_convert_station_uvw_to_baseline_uvw_cudak_f
OSKAR_CUDAK_CONF(num_stations, num_threads)
(num_stations, d_u, d_v, d_w, d_uu, d_vv, d_ww);
}
/* Double precision. */
void oskar_convert_station_uvw_to_baseline_uvw_cuda_d(int num_stations,
const double* d_u, const double* d_v, const double* d_w, double* d_uu,
double* d_vv, double* d_ww)
{
int num_threads = 32;
oskar_convert_station_uvw_to_baseline_uvw_cudak_d
OSKAR_CUDAK_CONF(num_stations, num_threads)
(num_stations, d_u, d_v, d_w, d_uu, d_vv, d_ww);
}
/* Kernels. ================================================================ */
/* Single precision. */
__global__
void oskar_convert_station_uvw_to_baseline_uvw_cudak_f(int num_stations,
const float* u, const float* v, const float* w, float* uu,
float* vv, float* ww)
{
/* Get first station index from block ID. */
int s1 = blockIdx.x;
/* Each thread does one baseline. */
for (int s2 = s1 + threadIdx.x + 1; s2 < num_stations; s2 += blockDim.x)
{
/* Determine baseline index from station IDs. */
int b = s1 * (num_stations - 1) - (s1 - 1) * s1/2 + s2 - s1 - 1;
/* Compute baselines. */
uu[b] = u[s2] - u[s1];
vv[b] = v[s2] - v[s1];
ww[b] = w[s2] - w[s1];
}
}
/* Double precision. */
__global__
void oskar_convert_station_uvw_to_baseline_uvw_cudak_d(int num_stations,
const double* u, const double* v, const double* w, double* uu,
double* vv, double* ww)
{
/* Get first station index from block ID. */
int s1 = blockIdx.x;
/* Each thread does one baseline. */
for (int s2 = s1 + threadIdx.x + 1; s2 < num_stations; s2 += blockDim.x)
{
/* Determine baseline index from station IDs. */
int b = s1 * (num_stations - 1) - (s1 - 1) * s1/2 + s2 - s1 - 1;
/* Compute baselines. */
uu[b] = u[s2] - u[s1];
vv[b] = v[s2] - v[s1];
ww[b] = w[s2] - w[s1];
}
}
#ifdef __cplusplus
}
#endif
|
d782c5ff2726f46296f807acbf80803fd00baa87.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* -- LAPACK routine (version 3.2) --
* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd..
* November 2006
*
* .. Scalar Arguments ..
INTEGER INFO, LDA, M, N
* ..
* .. Array Arguments ..
INTEGER IPIV( * )
DOUBLE PRECISION A( LDA, * )
* ..
*
* Purpose
* =======
*
* DGETF2 computes an LU factorization of a general m-by-n matrix A
* using partial pivoting with row interchanges.
*
* The factorization has the form
* A = P * L * U
* where P is a permutation matrix, L is lower triangular with unit
* diagonal elements (lower trapezoidal if m > n), and U is upper
* triangular (upper trapezoidal if m < n).
*
* This is the right-looking Level 2 BLAS version of the algorithm.
*
* Arguments
* =========
*
* M (input) INTEGER
* The number of rows of the matrix A. M >= 0.
*
* N (input) INTEGER
* The number of columns of the matrix A. N >= 0.
*
* A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
* On entry, the m by n matrix to be factored.
* On exit, the factors L and U from the factorization
* A = P*L*U; the unit diagonal elements of L are not stored.
*
* LDA (input) INTEGER
* The leading dimension of the array A. LDA >= max(1,M).
*
* IPIV (output) INTEGER array, dimension (min(M,N))
* The pivot indices; for 1 <= i <= min(M,N), row i of the
* matrix was interchanged with row IPIV(i).
*
* INFO (output) INTEGER
* = 0: successful exit
* < 0: if INFO = -k, the k-th argument had an illegal value
* > 0: if INFO = k, U(k,k) is exactly zero. The factorization
* has been completed, but the factor U is exactly
* singular, and division by zero will occur if it is used
* to solve a system of equations.
*
*/
#include <stdio.h>
#include <rocblas.h>
/* Input error reporting function, C version */
__device__ void report_error(const char *strName, int info)
{
printf(" ** On entry to %s parameter number %d had an illegal value\n", strName, info);
}
__device__ __noinline__ void dgetf2(hipblasHandle_t cb_handle, int m, int n, double *A, int lda, int *ipiv, int *info)
{
hipblasStatus_t status;
// The flag set by one thread to indicate a failure.
__shared__ int s_info;
// Initialize to 0.
if (threadIdx.x == 0)
s_info = 0;
__syncthreads();
// Basic argument checking
*info = 0;
if (m < 0)
{
*info = -1;
}
if (n < 0)
{
*info = -2;
}
if (lda < max(1, m))
{
*info = -4;
}
if (*info)
{
report_error("DGETF2", *info);
return;
}
// Quick return if possible
if (m == 0 || n == 0)
{
return;
}
// Compute machine safe minimum
const int minDim = min(m, n);
// Set up the pivot array, unless it was passed in to us already set-up
for (int j=0; j < minDim; j++)
{
int jp = 0;
if (threadIdx.x == 0)
{
status = hipblasIdamax(cb_handle, m-j, &A[j*lda + j], 1, &jp);
if (status != HIPBLAS_STATUS_SUCCESS)
{
printf("Failed idamax: %d\n", status);
s_info = 1;
}
jp += j-1; // hipblasIdamax is 1-indexed (so remove 1).
ipiv[j] = jp;
}
__syncthreads();
// Make sure both s_info and s_jp are valid.
if (s_info)
{
*info = s_info;
return;
}
// Load the value A(jp, j).
double rowval = threadIdx.x == 0 ? A[j*lda + jp] : 0.0;
// Only threadIdx.x == 0, can be different from 0.0.
if (threadIdx.x == 0 && rowval != 0.0 && jp != j)
{
status = hipblasDswap(cb_handle, n, &A[j], lda, &A[jp], lda);
if (status != HIPBLAS_STATUS_SUCCESS)
{
printf("Failed dswap: %d\n", status);
s_info = 1;
}
}
__syncthreads();
// Make sure s_info has the correct value.
if (s_info)
{
*info = s_info;
return;
}
// Compute elements J+1:M of J-th column.
if (threadIdx.x == 0 && rowval != 0.0 && j < m)
{
double scale = 1.0 / rowval;
status = hipblasDscal(cb_handle, m-j-1, &scale, &A[j*lda + j+1], 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
printf("Failed dscal: %d\n", status);
s_info = 1;
}
}
else if (threadIdx.x == 0 && rowval != 0.0)
{
s_info = j;
}
__syncthreads();
// Make sure s_info has the correct value.
if (s_info)
{
*info = s_info;
return;
}
if (threadIdx.x == 0 && j < minDim)
{
// Update trailing submatrix.
double alpha = -1.0;
status = hipblasDger(cb_handle, m-j-1, n-j-1, &alpha, &A[j*lda + j+1], 1, &A[(j+1)*lda + j], lda, &A[(j+1)*lda + j+1], lda);
if (status != HIPBLAS_STATUS_SUCCESS)
{
printf("Failed dger: %d\n", status);
s_info = 1;
}
}
__syncthreads();
// Make sure s_info has the correct value.
if (s_info)
{
*info = s_info;
return;
}
}
}
| d782c5ff2726f46296f807acbf80803fd00baa87.cu | /*
* -- LAPACK routine (version 3.2) --
* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd..
* November 2006
*
* .. Scalar Arguments ..
INTEGER INFO, LDA, M, N
* ..
* .. Array Arguments ..
INTEGER IPIV( * )
DOUBLE PRECISION A( LDA, * )
* ..
*
* Purpose
* =======
*
* DGETF2 computes an LU factorization of a general m-by-n matrix A
* using partial pivoting with row interchanges.
*
* The factorization has the form
* A = P * L * U
* where P is a permutation matrix, L is lower triangular with unit
* diagonal elements (lower trapezoidal if m > n), and U is upper
* triangular (upper trapezoidal if m < n).
*
* This is the right-looking Level 2 BLAS version of the algorithm.
*
* Arguments
* =========
*
* M (input) INTEGER
* The number of rows of the matrix A. M >= 0.
*
* N (input) INTEGER
* The number of columns of the matrix A. N >= 0.
*
* A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
* On entry, the m by n matrix to be factored.
* On exit, the factors L and U from the factorization
* A = P*L*U; the unit diagonal elements of L are not stored.
*
* LDA (input) INTEGER
* The leading dimension of the array A. LDA >= max(1,M).
*
* IPIV (output) INTEGER array, dimension (min(M,N))
* The pivot indices; for 1 <= i <= min(M,N), row i of the
* matrix was interchanged with row IPIV(i).
*
* INFO (output) INTEGER
* = 0: successful exit
* < 0: if INFO = -k, the k-th argument had an illegal value
* > 0: if INFO = k, U(k,k) is exactly zero. The factorization
* has been completed, but the factor U is exactly
* singular, and division by zero will occur if it is used
* to solve a system of equations.
*
*/
#include <stdio.h>
#include <cublas_v2.h>
/* Input error reporting function, C version */
__device__ void report_error(const char *strName, int info)
{
printf(" ** On entry to %s parameter number %d had an illegal value\n", strName, info);
}
__device__ __noinline__ void dgetf2(cublasHandle_t cb_handle, int m, int n, double *A, int lda, int *ipiv, int *info)
{
cublasStatus_t status;
// The flag set by one thread to indicate a failure.
__shared__ int s_info;
// Initialize to 0.
if (threadIdx.x == 0)
s_info = 0;
__syncthreads();
// Basic argument checking
*info = 0;
if (m < 0)
{
*info = -1;
}
if (n < 0)
{
*info = -2;
}
if (lda < max(1, m))
{
*info = -4;
}
if (*info)
{
report_error("DGETF2", *info);
return;
}
// Quick return if possible
if (m == 0 || n == 0)
{
return;
}
// Compute machine safe minimum
const int minDim = min(m, n);
// Set up the pivot array, unless it was passed in to us already set-up
for (int j=0; j < minDim; j++)
{
int jp = 0;
if (threadIdx.x == 0)
{
status = cublasIdamax_v2(cb_handle, m-j, &A[j*lda + j], 1, &jp);
if (status != CUBLAS_STATUS_SUCCESS)
{
printf("Failed idamax: %d\n", status);
s_info = 1;
}
jp += j-1; // cublasIdamax_v2 is 1-indexed (so remove 1).
ipiv[j] = jp;
}
__syncthreads();
// Make sure both s_info and s_jp are valid.
if (s_info)
{
*info = s_info;
return;
}
// Load the value A(jp, j).
double rowval = threadIdx.x == 0 ? A[j*lda + jp] : 0.0;
// Only threadIdx.x == 0, can be different from 0.0.
if (threadIdx.x == 0 && rowval != 0.0 && jp != j)
{
status = cublasDswap_v2(cb_handle, n, &A[j], lda, &A[jp], lda);
if (status != CUBLAS_STATUS_SUCCESS)
{
printf("Failed dswap: %d\n", status);
s_info = 1;
}
}
__syncthreads();
// Make sure s_info has the correct value.
if (s_info)
{
*info = s_info;
return;
}
// Compute elements J+1:M of J-th column.
if (threadIdx.x == 0 && rowval != 0.0 && j < m)
{
double scale = 1.0 / rowval;
status = cublasDscal_v2(cb_handle, m-j-1, &scale, &A[j*lda + j+1], 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
printf("Failed dscal: %d\n", status);
s_info = 1;
}
}
else if (threadIdx.x == 0 && rowval != 0.0)
{
s_info = j;
}
__syncthreads();
// Make sure s_info has the correct value.
if (s_info)
{
*info = s_info;
return;
}
if (threadIdx.x == 0 && j < minDim)
{
// Update trailing submatrix.
double alpha = -1.0;
status = cublasDger_v2(cb_handle, m-j-1, n-j-1, &alpha, &A[j*lda + j+1], 1, &A[(j+1)*lda + j], lda, &A[(j+1)*lda + j+1], lda);
if (status != CUBLAS_STATUS_SUCCESS)
{
printf("Failed dger: %d\n", status);
s_info = 1;
}
}
__syncthreads();
// Make sure s_info has the correct value.
if (s_info)
{
*info = s_info;
return;
}
}
}
|
48ee9dd5fe753bbe1ec404ae21aaa48d9831a58c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <ctime>
#define LOG_NUM_BANKS 5
#define NUM_BANKS 32
#define BLOCK_SIZE 64
#define DEBUG
#ifdef DEBUG
#define cudaCheckError(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr, "CUDA Error: %s at %s:%d\n",
hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#else
#define cudaCheckError(ans) ans
#endif
__device__ inline size_t NoConflictIndex(size_t index) {
return index;
// return index + (index >> LOG_NUM_BANKS);
}
__global__ void PrescanBlocks(float * out_data, const float * in_data, float * block_sums, const size_t data_size) {
// keeps all the in_data during processing
extern __shared__ float in_data_shared[];
size_t thread_id_local = threadIdx.x;
size_t offset = 1;
size_t thread_id_global = blockIdx.x * blockDim.x + thread_id_local;
if (thread_id_global >= data_size) {
return;
}
in_data_shared[NoConflictIndex(2 * thread_id_local)] = in_data[2 * thread_id_global];
in_data_shared[NoConflictIndex(2 * thread_id_local + 1)] = in_data[2 * thread_id_global + 1];
for (size_t level_size = 2 * blockDim.x >> 1; level_size > 0; level_size >>= 1) {
__syncthreads();
if (thread_id_local < level_size) {
size_t left_son_idx = offset * (2 * thread_id_local + 1) - 1;
size_t parent_idx = offset * (2 * thread_id_local + 2) - 1;
in_data_shared[NoConflictIndex(parent_idx)] += in_data_shared[NoConflictIndex(left_son_idx)];
}
offset *= 2;
}
if (thread_id_local == 0) {
block_sums[blockIdx.x] = in_data_shared[NoConflictIndex(blockDim.x * 2 - 1)];
in_data_shared[NoConflictIndex(blockDim.x * 2 - 1)] = 0;
}
for (size_t level_size = 1; level_size < 2 * blockDim.x; level_size *= 2) {
offset >>= 1;
__syncthreads();
if (thread_id_local < level_size) {
size_t left_son_idx = offset * (2 * thread_id_local + 1) - 1;
size_t parent_idx = offset * (2 * thread_id_local + 2) - 1;
float left_son_value = in_data_shared[NoConflictIndex(left_son_idx)];
in_data_shared[NoConflictIndex(left_son_idx)] = in_data_shared[NoConflictIndex(parent_idx)];
in_data_shared[NoConflictIndex(parent_idx)] += left_son_value;
}
}
__syncthreads();
out_data[2 * thread_id_global] = in_data_shared[NoConflictIndex(2 * thread_id_local)];
out_data[2 * thread_id_global + 1] = in_data_shared[NoConflictIndex(2 * thread_id_local + 1)];
}
__global__ void AddBlockSums(float * data, const float * block_sums, const size_t data_size) {
__shared__ float this_block_sum;
size_t thread_id_local = threadIdx.x;
size_t thread_id_global = blockIdx.x * blockDim.x + thread_id_local;
if (thread_id_global >= data_size) {
return;
}
if (thread_id_local == 0) {
this_block_sum = block_sums[blockIdx.x];
}
__syncthreads();
data[thread_id_global] += this_block_sum;
}
__host__ void PrescanBlockSums(float * block_sums, const size_t num_blocks) {
float sum = block_sums[0];
block_sums[0] = 0;
float keep;
for (size_t block_id = 1; block_id < num_blocks; ++block_id) {
keep = block_sums[block_id];
block_sums[block_id] = sum;
sum += keep;
}
}
void TotalPrescanGPU(const float * data, float * partial_sums, size_t data_size) {
float * d_data;
float * d_partial_sums;
float * d_block_sums;
float * block_sums;
size_t num_blocks = ((data_size + 2 * BLOCK_SIZE - 1) / (2 * BLOCK_SIZE));
size_t shared_size = ((2 * BLOCK_SIZE + NUM_BANKS - 1) / NUM_BANKS + BLOCK_SIZE) * 2 * sizeof(float);
block_sums = (float *) malloc(num_blocks * sizeof(float));
cudaCheckError( hipMalloc(&d_data, data_size * sizeof(float)) );
cudaCheckError( hipMalloc(&d_partial_sums, data_size * sizeof(float)) );
cudaCheckError( hipMalloc(&d_block_sums, num_blocks * sizeof(float)) );
hipMemcpy(d_data, data, data_size * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( PrescanBlocks), dim3(num_blocks), dim3(BLOCK_SIZE), shared_size, 0, d_partial_sums, d_data, d_block_sums, data_size);
hipMemcpy(block_sums, d_block_sums, num_blocks * sizeof(float), hipMemcpyDeviceToHost);
PrescanBlockSums(block_sums, num_blocks);
hipMemcpy(d_block_sums, block_sums, num_blocks * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( AddBlockSums), dim3(num_blocks), dim3(2 * BLOCK_SIZE), 0, 0, d_partial_sums, d_block_sums, data_size);
hipMemcpy(partial_sums, d_partial_sums, data_size * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_block_sums);
hipFree(d_partial_sums);
hipFree(d_data);
free(block_sums);
}
void TotalPrescanCPU(const float * data, float * partial_sums, size_t data_size) {
float sum = 0.0;
for (size_t idx = 0; idx < data_size; ++idx) {
partial_sums[idx] = sum;
sum += data[idx];
}
}
int main(int argc, char * argv[]) {
float * data;
float * partial_sums;
size_t logsize = atoi(argv[1]);
size_t num_elements = (1 << logsize);
data = (float *) malloc(num_elements * sizeof(float));
partial_sums = (float *) malloc(num_elements * sizeof(float));
for (size_t idx = 0; idx < num_elements; ++idx) {
data[idx] = 1.0 * idx;
}
size_t num_runs = 100;
float runtimes[100];
float gpu_mean = 0.0;
float gpu_std = 0.0;
for (size_t run = 0; run < num_runs; ++run) {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// hipEventRecord(start);
const clock_t begin_time = clock();
TotalPrescanGPU(data, partial_sums, num_elements);
float milliseconds = float(clock () - begin_time) / 1000;
// hipEventRecord(stop);
// hipEventSynchronize(stop);
// float milliseconds = 0;
// hipEventElapsedTime(&milliseconds, start, stop);
// std::cout << "GPU run took " << milliseconds << " ms" << std::endl;
runtimes[run] = milliseconds;
gpu_mean += milliseconds / num_runs;
}
for (size_t run = 0; run < num_runs; ++run) {
gpu_std += (gpu_mean - runtimes[run]) * (gpu_mean - runtimes[run]) / num_runs;
}
gpu_std = sqrt(gpu_std);
/*
float true_answer = 0.0;
bool correct = true;
for (size_t idx = 0; idx < num_elements - 1; ++idx) {
true_answer += idx;
if (true_answer != partial_sums[idx + 1]) {
correct = false;
std::cout << idx << " " << partial_sums[idx + 1] << " " << true_answer << std::endl;
}
}
if (!correct) {
std::cout << "incorrect" << std::endl;
}
*/
float cpu_mean = 0.0;
float cpu_std = 0.0;
for (size_t run = 0; run < num_runs; ++run) {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// hipEventRecord(start);
const clock_t begin_time = clock();
TotalPrescanCPU(data, partial_sums, num_elements);
float milliseconds = float(clock () - begin_time) / 1000;
// hipEventRecord(stop);
// hipEventSynchronize(stop);
// float milliseconds = 0;
// hipEventElapsedTime(&milliseconds, start, stop);
// std::cout << "GPU run took " << milliseconds << " ms" << std::endl;
runtimes[run] = milliseconds;
cpu_mean += milliseconds / num_runs;
}
for (size_t run = 0; run < num_runs; ++run) {
cpu_std += (cpu_mean - runtimes[run]) * (cpu_mean - runtimes[run]) / num_runs;
}
cpu_std = sqrt(cpu_std);
std::cout << num_elements << " " << gpu_mean << " " << gpu_std << " " << cpu_mean << " " << cpu_std << std::endl;
free(data);
free(partial_sums);
return 0;
}
| 48ee9dd5fe753bbe1ec404ae21aaa48d9831a58c.cu | #include <iostream>
#include <stdio.h>
#include <ctime>
#define LOG_NUM_BANKS 5
#define NUM_BANKS 32
#define BLOCK_SIZE 64
#define DEBUG
#ifdef DEBUG
#define cudaCheckError(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "CUDA Error: %s at %s:%d\n",
cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#else
#define cudaCheckError(ans) ans
#endif
__device__ inline size_t NoConflictIndex(size_t index) {
return index;
// return index + (index >> LOG_NUM_BANKS);
}
__global__ void PrescanBlocks(float * out_data, const float * in_data, float * block_sums, const size_t data_size) {
// keeps all the in_data during processing
extern __shared__ float in_data_shared[];
size_t thread_id_local = threadIdx.x;
size_t offset = 1;
size_t thread_id_global = blockIdx.x * blockDim.x + thread_id_local;
if (thread_id_global >= data_size) {
return;
}
in_data_shared[NoConflictIndex(2 * thread_id_local)] = in_data[2 * thread_id_global];
in_data_shared[NoConflictIndex(2 * thread_id_local + 1)] = in_data[2 * thread_id_global + 1];
for (size_t level_size = 2 * blockDim.x >> 1; level_size > 0; level_size >>= 1) {
__syncthreads();
if (thread_id_local < level_size) {
size_t left_son_idx = offset * (2 * thread_id_local + 1) - 1;
size_t parent_idx = offset * (2 * thread_id_local + 2) - 1;
in_data_shared[NoConflictIndex(parent_idx)] += in_data_shared[NoConflictIndex(left_son_idx)];
}
offset *= 2;
}
if (thread_id_local == 0) {
block_sums[blockIdx.x] = in_data_shared[NoConflictIndex(blockDim.x * 2 - 1)];
in_data_shared[NoConflictIndex(blockDim.x * 2 - 1)] = 0;
}
for (size_t level_size = 1; level_size < 2 * blockDim.x; level_size *= 2) {
offset >>= 1;
__syncthreads();
if (thread_id_local < level_size) {
size_t left_son_idx = offset * (2 * thread_id_local + 1) - 1;
size_t parent_idx = offset * (2 * thread_id_local + 2) - 1;
float left_son_value = in_data_shared[NoConflictIndex(left_son_idx)];
in_data_shared[NoConflictIndex(left_son_idx)] = in_data_shared[NoConflictIndex(parent_idx)];
in_data_shared[NoConflictIndex(parent_idx)] += left_son_value;
}
}
__syncthreads();
out_data[2 * thread_id_global] = in_data_shared[NoConflictIndex(2 * thread_id_local)];
out_data[2 * thread_id_global + 1] = in_data_shared[NoConflictIndex(2 * thread_id_local + 1)];
}
__global__ void AddBlockSums(float * data, const float * block_sums, const size_t data_size) {
__shared__ float this_block_sum;
size_t thread_id_local = threadIdx.x;
size_t thread_id_global = blockIdx.x * blockDim.x + thread_id_local;
if (thread_id_global >= data_size) {
return;
}
if (thread_id_local == 0) {
this_block_sum = block_sums[blockIdx.x];
}
__syncthreads();
data[thread_id_global] += this_block_sum;
}
__host__ void PrescanBlockSums(float * block_sums, const size_t num_blocks) {
float sum = block_sums[0];
block_sums[0] = 0;
float keep;
for (size_t block_id = 1; block_id < num_blocks; ++block_id) {
keep = block_sums[block_id];
block_sums[block_id] = sum;
sum += keep;
}
}
void TotalPrescanGPU(const float * data, float * partial_sums, size_t data_size) {
float * d_data;
float * d_partial_sums;
float * d_block_sums;
float * block_sums;
size_t num_blocks = ((data_size + 2 * BLOCK_SIZE - 1) / (2 * BLOCK_SIZE));
size_t shared_size = ((2 * BLOCK_SIZE + NUM_BANKS - 1) / NUM_BANKS + BLOCK_SIZE) * 2 * sizeof(float);
block_sums = (float *) malloc(num_blocks * sizeof(float));
cudaCheckError( cudaMalloc(&d_data, data_size * sizeof(float)) );
cudaCheckError( cudaMalloc(&d_partial_sums, data_size * sizeof(float)) );
cudaCheckError( cudaMalloc(&d_block_sums, num_blocks * sizeof(float)) );
cudaMemcpy(d_data, data, data_size * sizeof(float), cudaMemcpyHostToDevice);
PrescanBlocks<<<num_blocks, BLOCK_SIZE, shared_size>>>(d_partial_sums, d_data, d_block_sums, data_size);
cudaMemcpy(block_sums, d_block_sums, num_blocks * sizeof(float), cudaMemcpyDeviceToHost);
PrescanBlockSums(block_sums, num_blocks);
cudaMemcpy(d_block_sums, block_sums, num_blocks * sizeof(float), cudaMemcpyHostToDevice);
AddBlockSums<<<num_blocks, 2 * BLOCK_SIZE>>>(d_partial_sums, d_block_sums, data_size);
cudaMemcpy(partial_sums, d_partial_sums, data_size * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_block_sums);
cudaFree(d_partial_sums);
cudaFree(d_data);
free(block_sums);
}
void TotalPrescanCPU(const float * data, float * partial_sums, size_t data_size) {
float sum = 0.0;
for (size_t idx = 0; idx < data_size; ++idx) {
partial_sums[idx] = sum;
sum += data[idx];
}
}
int main(int argc, char * argv[]) {
float * data;
float * partial_sums;
size_t logsize = atoi(argv[1]);
size_t num_elements = (1 << logsize);
data = (float *) malloc(num_elements * sizeof(float));
partial_sums = (float *) malloc(num_elements * sizeof(float));
for (size_t idx = 0; idx < num_elements; ++idx) {
data[idx] = 1.0 * idx;
}
size_t num_runs = 100;
float runtimes[100];
float gpu_mean = 0.0;
float gpu_std = 0.0;
for (size_t run = 0; run < num_runs; ++run) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// cudaEventRecord(start);
const clock_t begin_time = clock();
TotalPrescanGPU(data, partial_sums, num_elements);
float milliseconds = float(clock () - begin_time) / 1000;
// cudaEventRecord(stop);
// cudaEventSynchronize(stop);
// float milliseconds = 0;
// cudaEventElapsedTime(&milliseconds, start, stop);
// std::cout << "GPU run took " << milliseconds << " ms" << std::endl;
runtimes[run] = milliseconds;
gpu_mean += milliseconds / num_runs;
}
for (size_t run = 0; run < num_runs; ++run) {
gpu_std += (gpu_mean - runtimes[run]) * (gpu_mean - runtimes[run]) / num_runs;
}
gpu_std = sqrt(gpu_std);
/*
float true_answer = 0.0;
bool correct = true;
for (size_t idx = 0; idx < num_elements - 1; ++idx) {
true_answer += idx;
if (true_answer != partial_sums[idx + 1]) {
correct = false;
std::cout << idx << " " << partial_sums[idx + 1] << " " << true_answer << std::endl;
}
}
if (!correct) {
std::cout << "incorrect" << std::endl;
}
*/
float cpu_mean = 0.0;
float cpu_std = 0.0;
for (size_t run = 0; run < num_runs; ++run) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// cudaEventRecord(start);
const clock_t begin_time = clock();
TotalPrescanCPU(data, partial_sums, num_elements);
float milliseconds = float(clock () - begin_time) / 1000;
// cudaEventRecord(stop);
// cudaEventSynchronize(stop);
// float milliseconds = 0;
// cudaEventElapsedTime(&milliseconds, start, stop);
// std::cout << "GPU run took " << milliseconds << " ms" << std::endl;
runtimes[run] = milliseconds;
cpu_mean += milliseconds / num_runs;
}
for (size_t run = 0; run < num_runs; ++run) {
cpu_std += (cpu_mean - runtimes[run]) * (cpu_mean - runtimes[run]) / num_runs;
}
cpu_std = sqrt(cpu_std);
std::cout << num_elements << " " << gpu_mean << " " << gpu_std << " " << cpu_mean << " " << cpu_std << std::endl;
free(data);
free(partial_sums);
return 0;
}
|
ae86e25dd964d3176859f6c2897920579bbdce23.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/Matrix.cuh"
#include <vector>
#include <algorithm> // std::max
/* ---------------------------------------------------------------
matMul
Parameters:
a - double ptr representing matrix A in row-major form
b - double ptr representing matrix B in row-major form
c - double ptr where AB will be stored in row-major form
m - rows in A / C
n - cols in A / rows in B
k - cols in B / C
Multiplies the matrices stored in row-major form in a and b, then stores
the output in c
Could be optimized much further with shared memory
--------------------------------------------------------------- */
__global__ void matMul(double *a, double *b, double *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0.0;
if (row >= m || col >= k)
return;
if (col < k && row < m)
{
for(int i = 0; i < n; i++)
sum += a[row * n + i] * b[i * k + col];
c[row * k + col] = sum;
} // end if
} // end matMul
/* ---------------------------------------------------------------
matMulGPU
Parameters:
a - vector representing first matrix
b - vector representing second matrix
m - rows in a
n - cols in a / rows in b
k - cols in b
Calls cuda kernel matMul on a.data() and b.data()
Returns:
c - vector representing AB (has dim m x k)
--------------------------------------------------------------- */
std::vector<double> matMulGPU(std::vector<double>& a, std::vector<double>& b, int m, int n, int k)
{
double *d_a, *d_b, *d_c;
std::vector<double> c(m * k);
int BLOCKSIZE = m >= 32 || k >= 32 ? 32 : ::max(m, k);
hipMalloc((void **) &d_a, m * n * sizeof(double));
hipMalloc((void **) &d_b, n * k * sizeof(double));
hipMalloc((void **) &d_c, m * k * sizeof(double));
hipMemcpy(d_a, a.data(), m * n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_b, b.data(), n * k * sizeof(double), hipMemcpyHostToDevice);
dim3 GRID((k + BLOCKSIZE - 1) / BLOCKSIZE, (m + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE, BLOCKSIZE);
hipLaunchKernelGGL(( matMul), dim3(GRID), dim3(BLOCK), 0, 0, d_a, d_b, d_c, m, n, k);
hipDeviceSynchronize();
hipMemcpy(c.data(), d_c, m * k * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return c;
} // end matMulGPU
/* ---------------------------------------------------------------
scalarMult
Parameters:
a - double ptr representing matrix A in row-major form
c - scalar to multiply a by
len - int representing length of row-major representation of A
Performs scalar multiplication and stores result in a
--------------------------------------------------------------- */
__global__ void scalarMult(double *a, double c, int len)
{
int g_idx = gridDim.x * blockDim.x * (blockDim.y * blockIdx.y + threadIdx.y)
+ blockDim.x * blockIdx.x + threadIdx.x;
if (g_idx >= len)
return;
a[g_idx] = c * a[g_idx];
} // end scalarMult
/* ---------------------------------------------------------------
scalarMultGPU
Parameters:
a - vector representing matrix A
c - scalar to multiply a by
m - rows in A
n - cols in A
Calls cuda kernel scalarMult on a.data()
Returns:
B - vector representing cA (has dim m x n)
--------------------------------------------------------------- */
std::vector<double> scalarMultGPU(std::vector<double>& a, double c, int m, int n)
{
double *d_a;
std::vector<double> b(m * n);
int BLOCKSIZE = m >= 32 || n >= 32 ? 32 : ::max(m, n);
hipMalloc((void **) &d_a, m * n * sizeof(double));
hipMemcpy(d_a, a.data(), m * n * sizeof(double), hipMemcpyHostToDevice);
dim3 GRID((n + BLOCKSIZE - 1) / BLOCKSIZE, (m + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE, BLOCKSIZE);
hipLaunchKernelGGL(( scalarMult), dim3(GRID), dim3(BLOCK), 0, 0, d_a, c, m * n);
hipDeviceSynchronize();
hipMemcpy(b.data(), d_a, m * n * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_a);
return b;
} // end scalarMultGPU
/* ---------------------------------------------------------------
hadamard
Parameters:
a - double ptr representing matrix A in row-major form
b - double ptr representing matrix B in row-major form
c - double ptr where A o B will be stored in row-major form
len - the length of row-major form of A, B, and C
Performs Hadamard operation (element-wise mult) and stores result in c
--------------------------------------------------------------- */
__global__ void hadamard(double *a, double *b, double *c, int len)
{
int g_idx = gridDim.x * blockDim.x * (blockIdx.y * blockDim.y + threadIdx.y)
+ blockIdx.x * blockDim.x + threadIdx.x;
if (g_idx >= len)
return;
c[g_idx] = a[g_idx] + b[g_idx];
} // end haramard
/* ---------------------------------------------------------------
hadamardGPU
Parameters:
a - vector representing matrix A
b - vector representing matrix B
m - rows in A / B
n - cols in A / B
Calls cuda kernel hadamard on a.data() and b.data()
Returns:
c - vector representing A o B (has dim m x n)
--------------------------------------------------------------- */
std::vector<double> hadamardGPU(std::vector<double>& a, std::vector<double>& b, int m, int n)
{
double *d_a, *d_b, *d_c;
std::vector<double> c(m * n);
int BLOCKSIZE = m >= 32 || n >= 32 ? 32 : ::max(m, n);
hipMalloc((void **) &d_a, m * n * sizeof(double));
hipMalloc((void **) &d_b, m * n * sizeof(double));
hipMalloc((void **) &d_c, m * n * sizeof(double));
hipMemcpy(d_a, a.data(), m * n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_b, b.data(), m * n * sizeof(double), hipMemcpyHostToDevice);
dim3 GRID((n + BLOCKSIZE - 1) / BLOCKSIZE, (m + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE, BLOCKSIZE);
hipLaunchKernelGGL(( hadamard), dim3(GRID), dim3(BLOCK), 0, 0, d_a, d_b, d_c, m * n);
hipDeviceSynchronize();
hipMemcpy(c.data(), d_c, m * n * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return c;
} // end hadamardGPU
/* ---------------------------------------------------------------
matAdd
Parameters:
a - double ptr representing matrix A in row-major form
b - double ptr representing matrix B in row-major form
c - double ptr where A + B will be stored in row-major form
len - the length of row-major form of A, B, and C
Performs A + B and stores result in c
--------------------------------------------------------------- */
__global__ void matAdd(double *a, double *b, double *c, int len)
{
int g_idx = gridDim.x * blockDim.x * (blockIdx.y * blockDim.y + threadIdx.y)
+ blockIdx.x * blockDim.x + threadIdx.x;
if (g_idx >= len)
return;
c[g_idx] = a[g_idx] + b[g_idx];
} // end matAdd
/* ---------------------------------------------------------------
matAddGPU
Parameters:
a - vector representing matrix A
b - vector representing matrix B
m - rows in A / B
n - cols in A / B
Calls cuda kernel matAdd on a.data() and b.data()
Returns:
c - vector representing A + B (has dim m x n)
--------------------------------------------------------------- */
std::vector<double> matAddGPU(std::vector<double>& a, std::vector<double>& b, int m, int n)
{
double *d_a, *d_b, *d_c;
std::vector<double> c(m * n);
int BLOCKSIZE = m >= 32 || n >= 32 ? 32 : ::max(m, n);
hipMalloc((void **) &d_a, m * n * sizeof(double));
hipMalloc((void **) &d_b, m * n * sizeof(double));
hipMalloc((void **) &d_c, m * n * sizeof(double));
hipMemcpy(d_a, a.data(), m * n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_b, b.data(), m * n * sizeof(double), hipMemcpyHostToDevice);
dim3 GRID((n + BLOCKSIZE - 1) / BLOCKSIZE, (m + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE, BLOCKSIZE);
hipLaunchKernelGGL(( matAdd), dim3(GRID), dim3(BLOCK), 0, 0, d_a, d_b, d_c, m * n);
hipDeviceSynchronize();
hipMemcpy(c.data(), d_c, m * n * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return c;
} // end matAddGPU
/* ---------------------------------------------------------------
matReciprocal
Parameters:
a - double ptr representing matrix A in row-major form
len - length of vector representing A
raises each value in A to the -1 power
--------------------------------------------------------------- */
__global__ void matReciprocal(double *a, int len)
{
int g_idx = gridDim.x * blockDim.x * (blockDim.y * blockIdx.y + threadIdx.y)
+ blockDim.x * blockIdx.x + threadIdx.x;
if (len >= g_idx)
return;
a[g_idx] = 1.0 / a[g_idx];
} // end matReciprocal
/* ---------------------------------------------------------------
matReciprocalGPU
Parameters:
a - vector representing matrix A
m - rows in matrix A
n - cols in matrix A
Calls cuda kernel matReciprocal on a.data()
Returns:
c - vector representing reciprocal A
--------------------------------------------------------------- */
std::vector<double> matReciprocalGPU(std::vector<double>& a, int m, int n)
{
double *d_a;
std::vector<double> c(m * n);
int BLOCKSIZE = m >= 32 || n >= 32 ? 32 : ::max(m, n);
hipMalloc((void **) &d_a, m * n * sizeof(double));
hipMemcpy(d_a, a.data(), m * n * sizeof(double), hipMemcpyHostToDevice);
dim3 GRID((n + BLOCKSIZE - 1) / BLOCKSIZE, (m + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE, BLOCKSIZE);
hipLaunchKernelGGL(( matReciprocal), dim3(GRID), dim3(BLOCK), 0, 0, d_a, m * n);
hipDeviceSynchronize();
hipMemcpy(c.data(), d_a, m * n * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_a);
return c;
} // end matMulGPU
/* ---------------------------------------------------------------
matSqrt
Parameters:
a - double ptr representing matrix A in row-major form
len - length of vector representing A
Square root of each value in A
--------------------------------------------------------------- */
__global__ void matSqrt(double *a, int len)
{
int g_idx = gridDim.x * blockDim.x * (blockDim.y * blockIdx.y + threadIdx.y)
+ blockDim.x * blockIdx.x + threadIdx.x;
if (len >= g_idx)
return;
a[g_idx] = sqrt(a[g_idx]);
} // end matSqrt
/* ---------------------------------------------------------------
matSqrtGPU
Parameters:
a - vector representing matrix A
m - rows in matrix A
n - cols in matrix A
Calls cuda kernel matSqrt on a.data()
Returns:
c - vector representing sqrt A
--------------------------------------------------------------- */
std::vector<double> matSqrtGPU(std::vector<double>& a, int m, int n)
{
double *d_a;
std::vector<double> c(m * n);
int BLOCKSIZE = m >= 32 || n >= 32 ? 32 : ::max(m, n);
hipMalloc((void **) &d_a, m * n * sizeof(double));
hipMemcpy(d_a, a.data(), m * n * sizeof(double), hipMemcpyHostToDevice);
dim3 GRID((n + BLOCKSIZE - 1) / BLOCKSIZE, (m + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE, BLOCKSIZE);
hipLaunchKernelGGL(( matSqrt), dim3(GRID), dim3(BLOCK), 0, 0, d_a, m * n);
hipDeviceSynchronize();
hipMemcpy(c.data(), d_a, m * n * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_a);
return c;
} // end matSqrtGPU
/* ---------------------------------------------------------------
matTrans
Parameters:
a - double ptr representing matrix A in row-major form
aT - double ptr representing matrix AT in row-major form
m - rows in A / cols in AT
n - cols in A / rows in AT
Transposes matrix A
--------------------------------------------------------------- */
__global__ void matTrans(double *a, double *aT, int m, int n)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col >= n || row >= m)
return;
if (col < n && row < m)
{
int pos = row * n + col;
int trans_pos = col * m + row;
aT[trans_pos] = a[pos];
} // end if
} // end matTrans
/* ---------------------------------------------------------------
matTransGPU
Parameters:
a - vector representing matrix A
m - rows in A / cols in AT
n - cols in A / rows in AT
Calls cuda kernel matTrans on a.data()
Returns:
aT - vector representing AT
--------------------------------------------------------------- */
std::vector<double> matTransGPU(std::vector<double>& a, int m, int n)
{
double *d_a, *d_aT;
std::vector<double> aT(m * n);
int BLOCKSIZE = m >= 32 || n >= 32 ? 32 : ::max(m, n);
size_t SIZE = m * n * sizeof(double);
hipMalloc((void **) &d_a, SIZE);
hipMalloc((void **) &d_aT, SIZE);
hipMemcpy(d_a, a.data(), SIZE, hipMemcpyHostToDevice);
dim3 GRID((n + BLOCKSIZE - 1) / BLOCKSIZE, (m + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE, BLOCKSIZE);
hipLaunchKernelGGL(( matTrans), dim3(GRID), dim3(BLOCK), 0, 0, d_a, d_aT, m, n);
hipDeviceSynchronize();
hipMemcpy(aT.data(), d_aT, SIZE, hipMemcpyDeviceToHost);
hipFree(d_a);
hipFree(d_aT);
return aT;
} // end matTransGPU
| ae86e25dd964d3176859f6c2897920579bbdce23.cu | #include "../include/Matrix.cuh"
#include <vector>
#include <algorithm> // std::max
/* ---------------------------------------------------------------
matMul
Parameters:
a - double ptr representing matrix A in row-major form
b - double ptr representing matrix B in row-major form
c - double ptr where AB will be stored in row-major form
m - rows in A / C
n - cols in A / rows in B
k - cols in B / C
Multiplies the matrices stored in row-major form in a and b, then stores
the output in c
Could be optimized much further with shared memory
--------------------------------------------------------------- */
__global__ void matMul(double *a, double *b, double *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0.0;
if (row >= m || col >= k)
return;
if (col < k && row < m)
{
for(int i = 0; i < n; i++)
sum += a[row * n + i] * b[i * k + col];
c[row * k + col] = sum;
} // end if
} // end matMul
/* ---------------------------------------------------------------
matMulGPU
Parameters:
a - vector representing first matrix
b - vector representing second matrix
m - rows in a
n - cols in a / rows in b
k - cols in b
Calls cuda kernel matMul on a.data() and b.data()
Returns:
c - vector representing AB (has dim m x k)
--------------------------------------------------------------- */
std::vector<double> matMulGPU(std::vector<double>& a, std::vector<double>& b, int m, int n, int k)
{
double *d_a, *d_b, *d_c;
std::vector<double> c(m * k);
int BLOCKSIZE = m >= 32 || k >= 32 ? 32 : std::max(m, k);
cudaMalloc((void **) &d_a, m * n * sizeof(double));
cudaMalloc((void **) &d_b, n * k * sizeof(double));
cudaMalloc((void **) &d_c, m * k * sizeof(double));
cudaMemcpy(d_a, a.data(), m * n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b.data(), n * k * sizeof(double), cudaMemcpyHostToDevice);
dim3 GRID((k + BLOCKSIZE - 1) / BLOCKSIZE, (m + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE, BLOCKSIZE);
matMul<<<GRID, BLOCK, 0>>>(d_a, d_b, d_c, m, n, k);
cudaDeviceSynchronize();
cudaMemcpy(c.data(), d_c, m * k * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return c;
} // end matMulGPU
/* ---------------------------------------------------------------
scalarMult
Parameters:
a - double ptr representing matrix A in row-major form
c - scalar to multiply a by
len - int representing length of row-major representation of A
Performs scalar multiplication and stores result in a
--------------------------------------------------------------- */
__global__ void scalarMult(double *a, double c, int len)
{
int g_idx = gridDim.x * blockDim.x * (blockDim.y * blockIdx.y + threadIdx.y)
+ blockDim.x * blockIdx.x + threadIdx.x;
if (g_idx >= len)
return;
a[g_idx] = c * a[g_idx];
} // end scalarMult
/* ---------------------------------------------------------------
scalarMultGPU
Parameters:
a - vector representing matrix A
c - scalar to multiply a by
m - rows in A
n - cols in A
Calls cuda kernel scalarMult on a.data()
Returns:
B - vector representing cA (has dim m x n)
--------------------------------------------------------------- */
std::vector<double> scalarMultGPU(std::vector<double>& a, double c, int m, int n)
{
double *d_a;
std::vector<double> b(m * n);
int BLOCKSIZE = m >= 32 || n >= 32 ? 32 : std::max(m, n);
cudaMalloc((void **) &d_a, m * n * sizeof(double));
cudaMemcpy(d_a, a.data(), m * n * sizeof(double), cudaMemcpyHostToDevice);
dim3 GRID((n + BLOCKSIZE - 1) / BLOCKSIZE, (m + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE, BLOCKSIZE);
scalarMult<<<GRID, BLOCK, 0>>>(d_a, c, m * n);
cudaDeviceSynchronize();
cudaMemcpy(b.data(), d_a, m * n * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_a);
return b;
} // end scalarMultGPU
/* ---------------------------------------------------------------
hadamard
Parameters:
a - double ptr representing matrix A in row-major form
b - double ptr representing matrix B in row-major form
c - double ptr where A o B will be stored in row-major form
len - the length of row-major form of A, B, and C
Performs Hadamard operation (element-wise mult) and stores result in c
--------------------------------------------------------------- */
__global__ void hadamard(double *a, double *b, double *c, int len)
{
int g_idx = gridDim.x * blockDim.x * (blockIdx.y * blockDim.y + threadIdx.y)
+ blockIdx.x * blockDim.x + threadIdx.x;
if (g_idx >= len)
return;
c[g_idx] = a[g_idx] + b[g_idx];
} // end haramard
/* ---------------------------------------------------------------
hadamardGPU
Parameters:
a - vector representing matrix A
b - vector representing matrix B
m - rows in A / B
n - cols in A / B
Calls cuda kernel hadamard on a.data() and b.data()
Returns:
c - vector representing A o B (has dim m x n)
--------------------------------------------------------------- */
std::vector<double> hadamardGPU(std::vector<double>& a, std::vector<double>& b, int m, int n)
{
double *d_a, *d_b, *d_c;
std::vector<double> c(m * n);
int BLOCKSIZE = m >= 32 || n >= 32 ? 32 : std::max(m, n);
cudaMalloc((void **) &d_a, m * n * sizeof(double));
cudaMalloc((void **) &d_b, m * n * sizeof(double));
cudaMalloc((void **) &d_c, m * n * sizeof(double));
cudaMemcpy(d_a, a.data(), m * n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b.data(), m * n * sizeof(double), cudaMemcpyHostToDevice);
dim3 GRID((n + BLOCKSIZE - 1) / BLOCKSIZE, (m + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE, BLOCKSIZE);
hadamard<<<GRID, BLOCK, 0>>>(d_a, d_b, d_c, m * n);
cudaDeviceSynchronize();
cudaMemcpy(c.data(), d_c, m * n * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return c;
} // end hadamardGPU
/* ---------------------------------------------------------------
matAdd
Parameters:
a - double ptr representing matrix A in row-major form
b - double ptr representing matrix B in row-major form
c - double ptr where A + B will be stored in row-major form
len - the length of row-major form of A, B, and C
Performs A + B and stores result in c
--------------------------------------------------------------- */
__global__ void matAdd(double *a, double *b, double *c, int len)
{
int g_idx = gridDim.x * blockDim.x * (blockIdx.y * blockDim.y + threadIdx.y)
+ blockIdx.x * blockDim.x + threadIdx.x;
if (g_idx >= len)
return;
c[g_idx] = a[g_idx] + b[g_idx];
} // end matAdd
/* ---------------------------------------------------------------
matAddGPU
Parameters:
a - vector representing matrix A
b - vector representing matrix B
m - rows in A / B
n - cols in A / B
Calls cuda kernel matAdd on a.data() and b.data()
Returns:
c - vector representing A + B (has dim m x n)
--------------------------------------------------------------- */
std::vector<double> matAddGPU(std::vector<double>& a, std::vector<double>& b, int m, int n)
{
double *d_a, *d_b, *d_c;
std::vector<double> c(m * n);
int BLOCKSIZE = m >= 32 || n >= 32 ? 32 : std::max(m, n);
cudaMalloc((void **) &d_a, m * n * sizeof(double));
cudaMalloc((void **) &d_b, m * n * sizeof(double));
cudaMalloc((void **) &d_c, m * n * sizeof(double));
cudaMemcpy(d_a, a.data(), m * n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b.data(), m * n * sizeof(double), cudaMemcpyHostToDevice);
dim3 GRID((n + BLOCKSIZE - 1) / BLOCKSIZE, (m + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE, BLOCKSIZE);
matAdd<<<GRID, BLOCK, 0>>>(d_a, d_b, d_c, m * n);
cudaDeviceSynchronize();
cudaMemcpy(c.data(), d_c, m * n * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return c;
} // end matAddGPU
/* ---------------------------------------------------------------
matReciprocal
Parameters:
a - double ptr representing matrix A in row-major form
len - length of vector representing A
raises each value in A to the -1 power
--------------------------------------------------------------- */
__global__ void matReciprocal(double *a, int len)
{
int g_idx = gridDim.x * blockDim.x * (blockDim.y * blockIdx.y + threadIdx.y)
+ blockDim.x * blockIdx.x + threadIdx.x;
if (len >= g_idx)
return;
a[g_idx] = 1.0 / a[g_idx];
} // end matReciprocal
/* ---------------------------------------------------------------
matReciprocalGPU
Parameters:
a - vector representing matrix A
m - rows in matrix A
n - cols in matrix A
Calls cuda kernel matReciprocal on a.data()
Returns:
c - vector representing reciprocal A
--------------------------------------------------------------- */
std::vector<double> matReciprocalGPU(std::vector<double>& a, int m, int n)
{
double *d_a;
std::vector<double> c(m * n);
int BLOCKSIZE = m >= 32 || n >= 32 ? 32 : std::max(m, n);
cudaMalloc((void **) &d_a, m * n * sizeof(double));
cudaMemcpy(d_a, a.data(), m * n * sizeof(double), cudaMemcpyHostToDevice);
dim3 GRID((n + BLOCKSIZE - 1) / BLOCKSIZE, (m + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE, BLOCKSIZE);
matReciprocal<<<GRID, BLOCK, 0>>>(d_a, m * n);
cudaDeviceSynchronize();
cudaMemcpy(c.data(), d_a, m * n * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_a);
return c;
} // end matMulGPU
/* ---------------------------------------------------------------
matSqrt
Parameters:
a - double ptr representing matrix A in row-major form
len - length of vector representing A
Square root of each value in A
--------------------------------------------------------------- */
__global__ void matSqrt(double *a, int len)
{
int g_idx = gridDim.x * blockDim.x * (blockDim.y * blockIdx.y + threadIdx.y)
+ blockDim.x * blockIdx.x + threadIdx.x;
if (len >= g_idx)
return;
a[g_idx] = sqrt(a[g_idx]);
} // end matSqrt
/* ---------------------------------------------------------------
matSqrtGPU
Parameters:
a - vector representing matrix A
m - rows in matrix A
n - cols in matrix A
Calls cuda kernel matSqrt on a.data()
Returns:
c - vector representing sqrt A
--------------------------------------------------------------- */
std::vector<double> matSqrtGPU(std::vector<double>& a, int m, int n)
{
double *d_a;
std::vector<double> c(m * n);
int BLOCKSIZE = m >= 32 || n >= 32 ? 32 : std::max(m, n);
cudaMalloc((void **) &d_a, m * n * sizeof(double));
cudaMemcpy(d_a, a.data(), m * n * sizeof(double), cudaMemcpyHostToDevice);
dim3 GRID((n + BLOCKSIZE - 1) / BLOCKSIZE, (m + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE, BLOCKSIZE);
matSqrt<<<GRID, BLOCK, 0>>>(d_a, m * n);
cudaDeviceSynchronize();
cudaMemcpy(c.data(), d_a, m * n * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_a);
return c;
} // end matSqrtGPU
/* ---------------------------------------------------------------
matTrans
Parameters:
a - double ptr representing matrix A in row-major form
aT - double ptr representing matrix AT in row-major form
m - rows in A / cols in AT
n - cols in A / rows in AT
Transposes matrix A
--------------------------------------------------------------- */
__global__ void matTrans(double *a, double *aT, int m, int n)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col >= n || row >= m)
return;
if (col < n && row < m)
{
int pos = row * n + col;
int trans_pos = col * m + row;
aT[trans_pos] = a[pos];
} // end if
} // end matTrans
/* ---------------------------------------------------------------
matTransGPU
Parameters:
a - vector representing matrix A
m - rows in A / cols in AT
n - cols in A / rows in AT
Calls cuda kernel matTrans on a.data()
Returns:
aT - vector representing AT
--------------------------------------------------------------- */
std::vector<double> matTransGPU(std::vector<double>& a, int m, int n)
{
double *d_a, *d_aT;
std::vector<double> aT(m * n);
int BLOCKSIZE = m >= 32 || n >= 32 ? 32 : std::max(m, n);
size_t SIZE = m * n * sizeof(double);
cudaMalloc((void **) &d_a, SIZE);
cudaMalloc((void **) &d_aT, SIZE);
cudaMemcpy(d_a, a.data(), SIZE, cudaMemcpyHostToDevice);
dim3 GRID((n + BLOCKSIZE - 1) / BLOCKSIZE, (m + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE, BLOCKSIZE);
matTrans<<<GRID, BLOCK, 0>>>(d_a, d_aT, m, n);
cudaDeviceSynchronize();
cudaMemcpy(aT.data(), d_aT, SIZE, cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_aT);
return aT;
} // end matTransGPU
|
f76e435546a0e55f52db9cae98b601923dc9e830.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <sgazeos@gmail.com>, created on 25.01.2019
//
#include <loops/special_kernels.h>
#include "execution/cuda/LaunchDims.h"
namespace sd {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// kernel to swap two NDArrays vals as linear sequences
// input - theSecondBuffer/Shape from input NDArray
// output - theFirstBuffer/Shape from input NDArray
template <typename T>
static SD_KERNEL void swapUnsafeKernel(void* theFirstBuffer, sd::LongType const* theFirstShape, void* theSecondBuffer,
sd::LongType const* theSecondShape) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
__shared__ sd::LongType resultLength, xEws, yEws;
__shared__ bool sameOffsets, sameOrders;
__shared__ T* input;
__shared__ T* output;
if (0 == threadIdx.x) {
resultLength = shape::length(theFirstShape);
input = reinterpret_cast<T*>(theSecondBuffer);
output = reinterpret_cast<T*>(theFirstBuffer);
sameOffsets = shape::haveSameShapeAndStrides(theFirstShape, theSecondShape);
sameOrders = shape::order(theFirstShape) == shape::order(theSecondShape);
xEws = shape::elementWiseStride(theFirstShape);
yEws = shape::elementWiseStride(theSecondShape);
}
__syncthreads();
for (int i = tid; i < resultLength; i += totalThreads) {
if (sameOrders && xEws > 0 && yEws > 0) {
sd::math::sd_swap(output[i * xEws], input[i * yEws]);
} else if (sameOffsets) {
const auto offset = shape::getIndexOffset(i, theFirstShape);
sd::math::sd_swap(output[offset], input[offset]);
} else {
const auto xOffset = shape::getIndexOffset(i, theFirstShape);
const auto yOffset = shape::getIndexOffset(i, theSecondShape);
sd::math::sd_swap(output[xOffset], input[yOffset]);
}
}
}
BUILD_SINGLE_TEMPLATE(template SD_KERNEL void swapUnsafeKernel,
(void* theFirstBuffer, sd::LongType const* theFirstShape, void* theSecondBuffer,
sd::LongType const* theSecondShape),
SD_COMMON_TYPES);
template <typename T>
void templatedSwapUnsafe(void* theFirstBuffer, sd::LongType const* theFirstShape, void* theSecondBuffer,
sd::LongType const* theSecondShape, hipStream_t* theStream) {
dim3 launchDims = getLaunchDims("swap_unsafe");
hipLaunchKernelGGL(( swapUnsafeKernel<T>), dim3(launchDims.y),dim3(launchDims.x), launchDims.z, *theStream, theFirstBuffer, theFirstShape, theSecondBuffer, theSecondShape);
}
BUILD_SINGLE_TEMPLATE(template void templatedSwapUnsafe,
(void* theFirstBuffer, sd::LongType const* theFirstShape, void* theSecondBuffer,
sd::LongType const* theSecondShape, hipStream_t* theStream),
SD_COMMON_TYPES);
} // namespace sd
| f76e435546a0e55f52db9cae98b601923dc9e830.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <sgazeos@gmail.com>, created on 25.01.2019
//
#include <loops/special_kernels.h>
#include "execution/cuda/LaunchDims.h"
namespace sd {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// kernel to swap two NDArrays vals as linear sequences
// input - theSecondBuffer/Shape from input NDArray
// output - theFirstBuffer/Shape from input NDArray
template <typename T>
static SD_KERNEL void swapUnsafeKernel(void* theFirstBuffer, sd::LongType const* theFirstShape, void* theSecondBuffer,
sd::LongType const* theSecondShape) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
__shared__ sd::LongType resultLength, xEws, yEws;
__shared__ bool sameOffsets, sameOrders;
__shared__ T* input;
__shared__ T* output;
if (0 == threadIdx.x) {
resultLength = shape::length(theFirstShape);
input = reinterpret_cast<T*>(theSecondBuffer);
output = reinterpret_cast<T*>(theFirstBuffer);
sameOffsets = shape::haveSameShapeAndStrides(theFirstShape, theSecondShape);
sameOrders = shape::order(theFirstShape) == shape::order(theSecondShape);
xEws = shape::elementWiseStride(theFirstShape);
yEws = shape::elementWiseStride(theSecondShape);
}
__syncthreads();
for (int i = tid; i < resultLength; i += totalThreads) {
if (sameOrders && xEws > 0 && yEws > 0) {
sd::math::sd_swap(output[i * xEws], input[i * yEws]);
} else if (sameOffsets) {
const auto offset = shape::getIndexOffset(i, theFirstShape);
sd::math::sd_swap(output[offset], input[offset]);
} else {
const auto xOffset = shape::getIndexOffset(i, theFirstShape);
const auto yOffset = shape::getIndexOffset(i, theSecondShape);
sd::math::sd_swap(output[xOffset], input[yOffset]);
}
}
}
BUILD_SINGLE_TEMPLATE(template SD_KERNEL void swapUnsafeKernel,
(void* theFirstBuffer, sd::LongType const* theFirstShape, void* theSecondBuffer,
sd::LongType const* theSecondShape),
SD_COMMON_TYPES);
template <typename T>
void templatedSwapUnsafe(void* theFirstBuffer, sd::LongType const* theFirstShape, void* theSecondBuffer,
sd::LongType const* theSecondShape, cudaStream_t* theStream) {
dim3 launchDims = getLaunchDims("swap_unsafe");
swapUnsafeKernel<T><<<launchDims.y,launchDims.x, launchDims.z, *theStream>>>(theFirstBuffer, theFirstShape, theSecondBuffer, theSecondShape);
}
BUILD_SINGLE_TEMPLATE(template void templatedSwapUnsafe,
(void* theFirstBuffer, sd::LongType const* theFirstShape, void* theSecondBuffer,
sd::LongType const* theSecondShape, cudaStream_t* theStream),
SD_COMMON_TYPES);
} // namespace sd
|
a173f743738d4652d00a60574b1412ac315dc809.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/reduce.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/utility.hpp"
#include "opencv2/gpu/device/type_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace detail
{
__device__ __forceinline__ int cvAtomicAdd(int* address, int val)
{
return ::atomicAdd(address, val);
}
__device__ __forceinline__ unsigned int cvAtomicAdd(unsigned int* address, unsigned int val)
{
return ::atomicAdd(address, val);
}
__device__ __forceinline__ float cvAtomicAdd(float* address, float val)
{
#if __CUDA_ARCH__ >= 200
return ::atomicAdd(address, val);
#else
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(val + __int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
#endif
}
__device__ __forceinline__ double cvAtomicAdd(double* address, double val)
{
#if __CUDA_ARCH__ >= 130
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
#else
(void) address;
(void) val;
return 0.0;
#endif
}
__device__ __forceinline__ int cvAtomicMin(int* address, int val)
{
return ::atomicMin(address, val);
}
__device__ __forceinline__ float cvAtomicMin(float* address, float val)
{
#if __CUDA_ARCH__ >= 120
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
#else
(void) address;
(void) val;
return 0.0f;
#endif
}
__device__ __forceinline__ double cvAtomicMin(double* address, double val)
{
#if __CUDA_ARCH__ >= 130
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_ull, assumed,
__double_as_longlong(::fmin(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
#else
(void) address;
(void) val;
return 0.0;
#endif
}
__device__ __forceinline__ int cvAtomicMax(int* address, int val)
{
return ::atomicMax(address, val);
}
__device__ __forceinline__ float cvAtomicMax(float* address, float val)
{
#if __CUDA_ARCH__ >= 120
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
#else
(void) address;
(void) val;
return 0.0f;
#endif
}
__device__ __forceinline__ double cvAtomicMax(double* address, double val)
{
#if __CUDA_ARCH__ >= 130
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_ull, assumed,
__double_as_longlong(::fmax(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
#else
(void) address;
(void) val;
return 0.0;
#endif
}
}
namespace detail
{
template <int cn> struct Unroll;
template <> struct Unroll<1>
{
template <int BLOCK_SIZE, typename R>
static __device__ __forceinline__ volatile R* smem_tuple(R* smem)
{
return smem;
}
template <typename R>
static __device__ __forceinline__ R& tie(R& val)
{
return val;
}
template <class Op>
static __device__ __forceinline__ const Op& op(const Op& op)
{
return op;
}
};
template <> struct Unroll<2>
{
template <int BLOCK_SIZE, typename R>
static __device__ __forceinline__ thrust::tuple<volatile R*, volatile R*> smem_tuple(R* smem)
{
return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE);
}
template <typename R>
static __device__ __forceinline__ thrust::tuple<typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&> tie(R& val)
{
return thrust::tie(val.x, val.y);
}
template <class Op>
static __device__ __forceinline__ const thrust::tuple<Op, Op> op(const Op& op)
{
return thrust::make_tuple(op, op);
}
};
template <> struct Unroll<3>
{
template <int BLOCK_SIZE, typename R>
static __device__ __forceinline__ thrust::tuple<volatile R*, volatile R*, volatile R*> smem_tuple(R* smem)
{
return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE);
}
template <typename R>
static __device__ __forceinline__ thrust::tuple<typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&> tie(R& val)
{
return thrust::tie(val.x, val.y, val.z);
}
template <class Op>
static __device__ __forceinline__ const thrust::tuple<Op, Op, Op> op(const Op& op)
{
return thrust::make_tuple(op, op, op);
}
};
template <> struct Unroll<4>
{
template <int BLOCK_SIZE, typename R>
static __device__ __forceinline__ thrust::tuple<volatile R*, volatile R*, volatile R*, volatile R*> smem_tuple(R* smem)
{
return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE);
}
template <typename R>
static __device__ __forceinline__ thrust::tuple<typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&> tie(R& val)
{
return thrust::tie(val.x, val.y, val.z, val.w);
}
template <class Op>
static __device__ __forceinline__ const thrust::tuple<Op, Op, Op, Op> op(const Op& op)
{
return thrust::make_tuple(op, op, op, op);
}
};
}
/////////////////////////////////////////////////////////////
// sum
namespace sum
{
__device__ unsigned int blocks_finished = 0;
template <typename R, int cn> struct AtomicAdd;
template <typename R> struct AtomicAdd<R, 1>
{
static __device__ void run(R* ptr, R val)
{
detail::cvAtomicAdd(ptr, val);
}
};
template <typename R> struct AtomicAdd<R, 2>
{
typedef typename TypeVec<R, 2>::vec_type val_type;
static __device__ void run(R* ptr, val_type val)
{
detail::cvAtomicAdd(ptr, val.x);
detail::cvAtomicAdd(ptr + 1, val.y);
}
};
template <typename R> struct AtomicAdd<R, 3>
{
typedef typename TypeVec<R, 3>::vec_type val_type;
static __device__ void run(R* ptr, val_type val)
{
detail::cvAtomicAdd(ptr, val.x);
detail::cvAtomicAdd(ptr + 1, val.y);
detail::cvAtomicAdd(ptr + 2, val.z);
}
};
template <typename R> struct AtomicAdd<R, 4>
{
typedef typename TypeVec<R, 4>::vec_type val_type;
static __device__ void run(R* ptr, val_type val)
{
detail::cvAtomicAdd(ptr, val.x);
detail::cvAtomicAdd(ptr + 1, val.y);
detail::cvAtomicAdd(ptr + 2, val.z);
detail::cvAtomicAdd(ptr + 3, val.w);
}
};
template <int BLOCK_SIZE, typename R, int cn>
struct GlobalReduce
{
typedef typename TypeVec<R, cn>::vec_type result_type;
static __device__ void run(result_type& sum, result_type* result, int tid, int bid, R* smem)
{
#if __CUDA_ARCH__ >= 200
if (tid == 0)
AtomicAdd<R, cn>::run((R*) result, sum);
#else
__shared__ bool is_last;
if (tid == 0)
{
result[bid] = sum;
__threadfence();
unsigned int ticket = ::atomicAdd(&blocks_finished, 1);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
sum = tid < gridDim.x * gridDim.y ? result[tid] : VecTraits<result_type>::all(0);
device::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(sum), tid, detail::Unroll<cn>::op(plus<R>()));
if (tid == 0)
{
result[0] = sum;
blocks_finished = 0;
}
}
#endif
}
};
template <int BLOCK_SIZE, typename src_type, typename result_type, class Mask, class Op>
__global__ void kernel(const PtrStepSz<src_type> src, result_type* result, const Mask mask, const Op op, const int twidth, const int theight)
{
typedef typename VecTraits<src_type>::elem_type T;
typedef typename VecTraits<result_type>::elem_type R;
const int cn = VecTraits<src_type>::cn;
__shared__ R smem[BLOCK_SIZE * cn];
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
result_type sum = VecTraits<result_type>::all(0);
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
{
const src_type* ptr = src.ptr(y);
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
{
if (mask(y, x))
{
const src_type srcVal = ptr[x];
sum = sum + op(saturate_cast<result_type>(srcVal));
}
}
}
device::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(sum), tid, detail::Unroll<cn>::op(plus<R>()));
GlobalReduce<BLOCK_SIZE, R, cn>::run(sum, result, tid, bid, smem);
}
const int threads_x = 32;
const int threads_y = 8;
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
{
block = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, block.x * block.y),
divUp(rows, block.y * block.x));
grid.x = ::min(grid.x, block.x);
grid.y = ::min(grid.y, block.y);
}
void getBufSize(int cols, int rows, int cn, int& bufcols, int& bufrows)
{
dim3 block, grid;
getLaunchCfg(cols, rows, block, grid);
bufcols = grid.x * grid.y * sizeof(double) * cn;
bufrows = 1;
}
template <typename T, typename R, int cn, template <typename> class Op>
void caller(PtrStepSzb src_, void* buf_, double* out, PtrStepSzb mask)
{
typedef typename TypeVec<T, cn>::vec_type src_type;
typedef typename TypeVec<R, cn>::vec_type result_type;
PtrStepSz<src_type> src(src_);
result_type* buf = (result_type*) buf_;
dim3 block, grid;
getLaunchCfg(src.cols, src.rows, block, grid);
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
const int theight = divUp(divUp(src.rows, grid.y), block.y);
Op<result_type> op;
if (mask.data)
hipLaunchKernelGGL(( kernel<threads_x * threads_y>), dim3(grid), dim3(block), 0, 0, src, buf, SingleMask(mask), op, twidth, theight);
else
hipLaunchKernelGGL(( kernel<threads_x * threads_y>), dim3(grid), dim3(block), 0, 0, src, buf, WithOutMask(), op, twidth, theight);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall( hipMemcpy(&result, buf, sizeof(result_type), hipMemcpyDeviceToHost) );
out[0] = result[0];
out[1] = result[1];
out[2] = result[2];
out[3] = result[3];
}
template <typename T> struct SumType;
template <> struct SumType<uchar> { typedef unsigned int R; };
template <> struct SumType<schar> { typedef int R; };
template <> struct SumType<ushort> { typedef unsigned int R; };
template <> struct SumType<short> { typedef int R; };
template <> struct SumType<int> { typedef int R; };
template <> struct SumType<float> { typedef float R; };
template <> struct SumType<double> { typedef double R; };
template <typename T, int cn>
void run(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask)
{
typedef typename SumType<T>::R R;
caller<T, R, cn, identity>(src, buf, out, mask);
}
template void run<uchar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<uchar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<uchar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<uchar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<schar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<schar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<schar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<schar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<ushort, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<ushort, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<ushort, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<ushort, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<short, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<short, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<short, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<short, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<int, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<int, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<int, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<int, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<float, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<float, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<float, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<float, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<double, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<double, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<double, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<double, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template <typename T, int cn>
void runAbs(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask)
{
typedef typename SumType<T>::R R;
caller<T, R, cn, abs_func>(src, buf, out, mask);
}
template void runAbs<uchar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<uchar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<uchar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<uchar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<schar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<schar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<schar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<schar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<ushort, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<ushort, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<ushort, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<ushort, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<short, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<short, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<short, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<short, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<int, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<int, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<int, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<int, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<float, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<float, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<float, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<float, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<double, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<double, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<double, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<double, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template <typename T> struct Sqr : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(T x) const
{
return x * x;
}
};
template <typename T, int cn>
void runSqr(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask)
{
caller<T, double, cn, Sqr>(src, buf, out, mask);
}
template void runSqr<uchar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<uchar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<uchar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<uchar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<schar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<schar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<schar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<schar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<ushort, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<ushort, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<ushort, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<ushort, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<short, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<short, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<short, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<short, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<int, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<int, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<int, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<int, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<float, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<float, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<float, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<float, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<double, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<double, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<double, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<double, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
}
/////////////////////////////////////////////////////////////
// minMax
namespace minMax
{
__device__ unsigned int blocks_finished = 0;
// To avoid shared bank conflicts we convert each value into value of
// appropriate type (32 bits minimum)
template <typename T> struct MinMaxTypeTraits;
template <> struct MinMaxTypeTraits<uchar> { typedef int best_type; };
template <> struct MinMaxTypeTraits<schar> { typedef int best_type; };
template <> struct MinMaxTypeTraits<ushort> { typedef int best_type; };
template <> struct MinMaxTypeTraits<short> { typedef int best_type; };
template <> struct MinMaxTypeTraits<int> { typedef int best_type; };
template <> struct MinMaxTypeTraits<float> { typedef float best_type; };
template <> struct MinMaxTypeTraits<double> { typedef double best_type; };
template <int BLOCK_SIZE, typename R>
struct GlobalReduce
{
static __device__ void run(R& mymin, R& mymax, R* minval, R* maxval, int tid, int bid, R* sminval, R* smaxval)
{
#if __CUDA_ARCH__ >= 200
if (tid == 0)
{
detail::cvAtomicMin(minval, mymin);
detail::cvAtomicMax(maxval, mymax);
}
#else
__shared__ bool is_last;
if (tid == 0)
{
minval[bid] = mymin;
maxval[bid] = mymax;
__threadfence();
unsigned int ticket = ::atomicAdd(&blocks_finished, 1);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
int idx = ::min(tid, gridDim.x * gridDim.y - 1);
mymin = minval[idx];
mymax = maxval[idx];
const minimum<R> minOp;
const maximum<R> maxOp;
device::reduce<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax), tid, thrust::make_tuple(minOp, maxOp));
if (tid == 0)
{
minval[0] = mymin;
maxval[0] = mymax;
blocks_finished = 0;
}
}
#endif
}
};
template <int BLOCK_SIZE, typename T, typename R, class Mask>
__global__ void kernel(const PtrStepSz<T> src, const Mask mask, R* minval, R* maxval, const int twidth, const int theight)
{
__shared__ R sminval[BLOCK_SIZE];
__shared__ R smaxval[BLOCK_SIZE];
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
R mymin = numeric_limits<R>::max();
R mymax = -numeric_limits<R>::max();
const minimum<R> minOp;
const maximum<R> maxOp;
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
{
const T* ptr = src.ptr(y);
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
{
if (mask(y, x))
{
const R srcVal = ptr[x];
mymin = minOp(mymin, srcVal);
mymax = maxOp(mymax, srcVal);
}
}
}
device::reduce<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax), tid, thrust::make_tuple(minOp, maxOp));
GlobalReduce<BLOCK_SIZE, R>::run(mymin, mymax, minval, maxval, tid, bid, sminval, smaxval);
}
const int threads_x = 32;
const int threads_y = 8;
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
{
block = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, block.x * block.y),
divUp(rows, block.y * block.x));
grid.x = ::min(grid.x, block.x);
grid.y = ::min(grid.y, block.y);
}
void getBufSize(int cols, int rows, int& bufcols, int& bufrows)
{
dim3 block, grid;
getLaunchCfg(cols, rows, block, grid);
bufcols = grid.x * grid.y * sizeof(double);
bufrows = 2;
}
__global__ void setDefaultKernel(int* minval_buf, int* maxval_buf)
{
*minval_buf = numeric_limits<int>::max();
*maxval_buf = numeric_limits<int>::min();
}
__global__ void setDefaultKernel(float* minval_buf, float* maxval_buf)
{
*minval_buf = numeric_limits<float>::max();
*maxval_buf = -numeric_limits<float>::max();
}
__global__ void setDefaultKernel(double* minval_buf, double* maxval_buf)
{
*minval_buf = numeric_limits<double>::max();
*maxval_buf = -numeric_limits<double>::max();
}
template <typename R>
void setDefault(R* minval_buf, R* maxval_buf)
{
hipLaunchKernelGGL(( setDefaultKernel), dim3(1), dim3(1), 0, 0, minval_buf, maxval_buf);
}
template <typename T>
void run(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf)
{
typedef typename MinMaxTypeTraits<T>::best_type R;
dim3 block, grid;
getLaunchCfg(src.cols, src.rows, block, grid);
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
const int theight = divUp(divUp(src.rows, grid.y), block.y);
R* minval_buf = (R*) buf.ptr(0);
R* maxval_buf = (R*) buf.ptr(1);
setDefault(minval_buf, maxval_buf);
if (mask.data)
hipLaunchKernelGGL(( kernel<threads_x * threads_y>), dim3(grid), dim3(block), 0, 0, (PtrStepSz<T>) src, SingleMask(mask), minval_buf, maxval_buf, twidth, theight);
else
hipLaunchKernelGGL(( kernel<threads_x * threads_y>), dim3(grid), dim3(block), 0, 0, (PtrStepSz<T>) src, WithOutMask(), minval_buf, maxval_buf, twidth, theight);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
R minval_, maxval_;
cudaSafeCall( hipMemcpy(&minval_, minval_buf, sizeof(R), hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(&maxval_, maxval_buf, sizeof(R), hipMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void run<uchar >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template void run<schar >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template void run<ushort>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template void run<short >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template void run<int >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template void run<float >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template void run<double>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
}
/////////////////////////////////////////////////////////////
// minMaxLoc
namespace minMaxLoc
{
// To avoid shared bank conflicts we convert each value into value of
// appropriate type (32 bits minimum)
template <typename T> struct MinMaxTypeTraits;
template <> struct MinMaxTypeTraits<unsigned char> { typedef int best_type; };
template <> struct MinMaxTypeTraits<signed char> { typedef int best_type; };
template <> struct MinMaxTypeTraits<unsigned short> { typedef int best_type; };
template <> struct MinMaxTypeTraits<short> { typedef int best_type; };
template <> struct MinMaxTypeTraits<int> { typedef int best_type; };
template <> struct MinMaxTypeTraits<float> { typedef float best_type; };
template <> struct MinMaxTypeTraits<double> { typedef double best_type; };
template <int BLOCK_SIZE, typename T, class Mask>
__global__ void kernel_pass_1(const PtrStepSz<T> src, const Mask mask, T* minval, T* maxval, unsigned int* minloc, unsigned int* maxloc, const int twidth, const int theight)
{
typedef typename MinMaxTypeTraits<T>::best_type work_type;
__shared__ work_type sminval[BLOCK_SIZE];
__shared__ work_type smaxval[BLOCK_SIZE];
__shared__ unsigned int sminloc[BLOCK_SIZE];
__shared__ unsigned int smaxloc[BLOCK_SIZE];
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
work_type mymin = numeric_limits<work_type>::max();
work_type mymax = -numeric_limits<work_type>::max();
unsigned int myminloc = 0;
unsigned int mymaxloc = 0;
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
{
const T* ptr = src.ptr(y);
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
{
if (mask(y, x))
{
const work_type srcVal = ptr[x];
if (srcVal < mymin)
{
mymin = srcVal;
myminloc = y * src.cols + x;
}
if (srcVal > mymax)
{
mymax = srcVal;
mymaxloc = y * src.cols + x;
}
}
}
}
reduceKeyVal<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax),
smem_tuple(sminloc, smaxloc), thrust::tie(myminloc, mymaxloc),
tid,
thrust::make_tuple(less<work_type>(), greater<work_type>()));
if (tid == 0)
{
minval[bid] = (T) mymin;
maxval[bid] = (T) mymax;
minloc[bid] = myminloc;
maxloc[bid] = mymaxloc;
}
}
template <int BLOCK_SIZE, typename T>
__global__ void kernel_pass_2(T* minval, T* maxval, unsigned int* minloc, unsigned int* maxloc, int count)
{
typedef typename MinMaxTypeTraits<T>::best_type work_type;
__shared__ work_type sminval[BLOCK_SIZE];
__shared__ work_type smaxval[BLOCK_SIZE];
__shared__ unsigned int sminloc[BLOCK_SIZE];
__shared__ unsigned int smaxloc[BLOCK_SIZE];
unsigned int idx = ::min(threadIdx.x, count - 1);
work_type mymin = minval[idx];
work_type mymax = maxval[idx];
unsigned int myminloc = minloc[idx];
unsigned int mymaxloc = maxloc[idx];
reduceKeyVal<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax),
smem_tuple(sminloc, smaxloc), thrust::tie(myminloc, mymaxloc),
threadIdx.x,
thrust::make_tuple(less<work_type>(), greater<work_type>()));
if (threadIdx.x == 0)
{
minval[0] = (T) mymin;
maxval[0] = (T) mymax;
minloc[0] = myminloc;
maxloc[0] = mymaxloc;
}
}
const int threads_x = 32;
const int threads_y = 8;
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
{
block = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, block.x * block.y),
divUp(rows, block.y * block.x));
grid.x = ::min(grid.x, block.x);
grid.y = ::min(grid.y, block.y);
}
void getBufSize(int cols, int rows, size_t elem_size, int& b1cols, int& b1rows, int& b2cols, int& b2rows)
{
dim3 block, grid;
getLaunchCfg(cols, rows, block, grid);
// For values
b1cols = grid.x * grid.y * elem_size;
b1rows = 2;
// For locations
b2cols = grid.x * grid.y * sizeof(int);
b2rows = 2;
}
template <typename T>
void run(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf)
{
dim3 block, grid;
getLaunchCfg(src.cols, src.rows, block, grid);
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
const int theight = divUp(divUp(src.rows, grid.y), block.y);
T* minval_buf = (T*) valbuf.ptr(0);
T* maxval_buf = (T*) valbuf.ptr(1);
unsigned int* minloc_buf = locbuf.ptr(0);
unsigned int* maxloc_buf = locbuf.ptr(1);
if (mask.data)
hipLaunchKernelGGL(( kernel_pass_1<threads_x * threads_y>), dim3(grid), dim3(block), 0, 0, (PtrStepSz<T>) src, SingleMask(mask), minval_buf, maxval_buf, minloc_buf, maxloc_buf, twidth, theight);
else
hipLaunchKernelGGL(( kernel_pass_1<threads_x * threads_y>), dim3(grid), dim3(block), 0, 0, (PtrStepSz<T>) src, WithOutMask(), minval_buf, maxval_buf, minloc_buf, maxloc_buf, twidth, theight);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( kernel_pass_2<threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0, minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( hipMemcpy(&minval_, minval_buf, sizeof(T), hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(&maxval_, maxval_buf, sizeof(T), hipMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
unsigned int minloc_, maxloc_;
cudaSafeCall( hipMemcpy(&minloc_, minloc_buf, sizeof(unsigned int), hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(&maxloc_, maxloc_buf, sizeof(unsigned int), hipMemcpyDeviceToHost) );
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void run<unsigned char >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
template void run<signed char >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
template void run<unsigned short>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
template void run<short >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
template void run<int >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
template void run<float >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
template void run<double>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
}
/////////////////////////////////////////////////////////////
// countNonZero
namespace countNonZero
{
__device__ unsigned int blocks_finished = 0;
template <int BLOCK_SIZE, typename T>
__global__ void kernel(const PtrStepSz<T> src, unsigned int* count, const int twidth, const int theight)
{
__shared__ unsigned int scount[BLOCK_SIZE];
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int mycount = 0;
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
{
const T* ptr = src.ptr(y);
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
{
const T srcVal = ptr[x];
mycount += (srcVal != 0);
}
}
device::reduce<BLOCK_SIZE>(scount, mycount, tid, plus<unsigned int>());
#if __CUDA_ARCH__ >= 200
if (tid == 0)
::atomicAdd(count, mycount);
#else
__shared__ bool is_last;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
if (tid == 0)
{
count[bid] = mycount;
__threadfence();
unsigned int ticket = ::atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
mycount = tid < gridDim.x * gridDim.y ? count[tid] : 0;
device::reduce<BLOCK_SIZE>(scount, mycount, tid, plus<unsigned int>());
if (tid == 0)
{
count[0] = mycount;
blocks_finished = 0;
}
}
#endif
}
const int threads_x = 32;
const int threads_y = 8;
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
{
block = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, block.x * block.y),
divUp(rows, block.y * block.x));
grid.x = ::min(grid.x, block.x);
grid.y = ::min(grid.y, block.y);
}
void getBufSize(int cols, int rows, int& bufcols, int& bufrows)
{
dim3 block, grid;
getLaunchCfg(cols, rows, block, grid);
bufcols = grid.x * grid.y * sizeof(int);
bufrows = 1;
}
template <typename T>
int run(const PtrStepSzb src, PtrStep<unsigned int> buf)
{
dim3 block, grid;
getLaunchCfg(src.cols, src.rows, block, grid);
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
const int theight = divUp(divUp(src.rows, grid.y), block.y);
unsigned int* count_buf = buf.ptr(0);
cudaSafeCall( hipMemset(count_buf, 0, sizeof(unsigned int)) );
hipLaunchKernelGGL(( kernel<threads_x * threads_y>), dim3(grid), dim3(block), 0, 0, (PtrStepSz<T>) src, count_buf, twidth, theight);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
unsigned int count;
cudaSafeCall(hipMemcpy(&count, count_buf, sizeof(unsigned int), hipMemcpyDeviceToHost));
return count;
}
template int run<uchar >(const PtrStepSzb src, PtrStep<unsigned int> buf);
template int run<schar >(const PtrStepSzb src, PtrStep<unsigned int> buf);
template int run<ushort>(const PtrStepSzb src, PtrStep<unsigned int> buf);
template int run<short >(const PtrStepSzb src, PtrStep<unsigned int> buf);
template int run<int >(const PtrStepSzb src, PtrStep<unsigned int> buf);
template int run<float >(const PtrStepSzb src, PtrStep<unsigned int> buf);
template int run<double>(const PtrStepSzb src, PtrStep<unsigned int> buf);
}
//////////////////////////////////////////////////////////////////////////////
// reduce
namespace reduce
{
struct Sum
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(0);
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
return a + b;
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__device__ __forceinline__ Sum() {}
__device__ __forceinline__ Sum(const Sum&) {}
};
struct Avg
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(0);
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
return a + b;
}
template <typename T>
__device__ __forceinline__ typename TypeVec<double, VecTraits<T>::cn>::vec_type result(T r, double sz) const
{
return r / sz;
}
__device__ __forceinline__ Avg() {}
__device__ __forceinline__ Avg(const Avg&) {}
};
struct Min
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(numeric_limits<typename VecTraits<T>::elem_type>::max());
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
minimum<T> minOp;
return minOp(a, b);
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__device__ __forceinline__ Min() {}
__device__ __forceinline__ Min(const Min&) {}
};
struct Max
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(-numeric_limits<typename VecTraits<T>::elem_type>::max());
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
maximum<T> maxOp;
return maxOp(a, b);
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__device__ __forceinline__ Max() {}
__device__ __forceinline__ Max(const Max&) {}
};
///////////////////////////////////////////////////////////
template <typename T, typename S, typename D, class Op>
__global__ void rowsKernel(const PtrStepSz<T> src, D* dst, const Op op)
{
__shared__ S smem[16 * 16];
const int x = blockIdx.x * 16 + threadIdx.x;
S myVal = op.template startValue<S>();
if (x < src.cols)
{
for (int y = threadIdx.y; y < src.rows; y += 16)
{
S srcVal = src(y, x);
myVal = op(myVal, srcVal);
}
}
smem[threadIdx.x * 16 + threadIdx.y] = myVal;
__syncthreads();
volatile S* srow = smem + threadIdx.y * 16;
myVal = srow[threadIdx.x];
device::reduce<16>(srow, myVal, threadIdx.x, op);
if (threadIdx.x == 0)
srow[0] = myVal;
__syncthreads();
if (threadIdx.y == 0 && x < src.cols)
dst[x] = (D) op.result(smem[threadIdx.x * 16], src.rows);
}
template <typename T, typename S, typename D, class Op>
void rowsCaller(PtrStepSz<T> src, D* dst, hipStream_t stream)
{
const dim3 block(16, 16);
const dim3 grid(divUp(src.cols, block.x));
Op op;
hipLaunchKernelGGL(( rowsKernel<T, S, D, Op>), dim3(grid), dim3(block), 0, stream, src, dst, op);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, typename S, typename D>
void rows(PtrStepSzb src, void* dst, int op, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSz<T> src, D* dst, hipStream_t stream);
static const func_t funcs[] =
{
rowsCaller<T, S, D, Sum>,
rowsCaller<T, S, D, Avg>,
rowsCaller<T, S, D, Max>,
rowsCaller<T, S, D, Min>
};
funcs[op]((PtrStepSz<T>) src, (D*) dst, stream);
}
template void rows<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned char, int, int>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned char, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned char, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned short, int, unsigned short>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned short, int, int>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned short, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned short, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<short, int, short>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<short, int, int>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<short, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<short, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<int, int, int>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<int, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<int, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<float, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<float, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<double, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
///////////////////////////////////////////////////////////
template <int BLOCK_SIZE, typename T, typename S, typename D, int cn, class Op>
__global__ void colsKernel(const PtrStepSz<typename TypeVec<T, cn>::vec_type> src, typename TypeVec<D, cn>::vec_type* dst, const Op op)
{
typedef typename TypeVec<T, cn>::vec_type src_type;
typedef typename TypeVec<S, cn>::vec_type work_type;
typedef typename TypeVec<D, cn>::vec_type dst_type;
__shared__ S smem[BLOCK_SIZE * cn];
const int y = blockIdx.x;
const src_type* srcRow = src.ptr(y);
work_type myVal = op.template startValue<work_type>();
for (int x = threadIdx.x; x < src.cols; x += BLOCK_SIZE)
myVal = op(myVal, saturate_cast<work_type>(srcRow[x]));
device::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(myVal), threadIdx.x, detail::Unroll<cn>::op(op));
if (threadIdx.x == 0)
dst[y] = saturate_cast<dst_type>(op.result(myVal, src.cols));
}
template <typename T, typename S, typename D, int cn, class Op> void colsCaller(PtrStepSzb src, void* dst, hipStream_t stream)
{
const int BLOCK_SIZE = 256;
const dim3 block(BLOCK_SIZE);
const dim3 grid(src.rows);
Op op;
hipLaunchKernelGGL(( colsKernel<BLOCK_SIZE, T, S, D, cn, Op>), dim3(grid), dim3(block), 0, stream, (PtrStepSz<typename TypeVec<T, cn>::vec_type>) src, (typename TypeVec<D, cn>::vec_type*) dst, op);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, typename S, typename D> void cols(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, void* dst, hipStream_t stream);
static const func_t funcs[5][4] =
{
{0,0,0,0},
{colsCaller<T, S, D, 1, Sum>, colsCaller<T, S, D, 1, Avg>, colsCaller<T, S, D, 1, Max>, colsCaller<T, S, D, 1, Min>},
{colsCaller<T, S, D, 2, Sum>, colsCaller<T, S, D, 2, Avg>, colsCaller<T, S, D, 2, Max>, colsCaller<T, S, D, 2, Min>},
{colsCaller<T, S, D, 3, Sum>, colsCaller<T, S, D, 3, Avg>, colsCaller<T, S, D, 3, Max>, colsCaller<T, S, D, 3, Min>},
{colsCaller<T, S, D, 4, Sum>, colsCaller<T, S, D, 4, Avg>, colsCaller<T, S, D, 4, Max>, colsCaller<T, S, D, 4, Min>},
};
funcs[cn][op](src, dst, stream);
}
template void cols<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned char, int, int>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned char, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned char, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned short, int, unsigned short>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned short, int, int>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned short, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned short, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<short, int, short>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<short, int, int>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<short, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<short, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<int, int, int>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<int, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<int, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<float, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<float, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<double, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| a173f743738d4652d00a60574b1412ac315dc809.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/reduce.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/utility.hpp"
#include "opencv2/gpu/device/type_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace detail
{
__device__ __forceinline__ int cvAtomicAdd(int* address, int val)
{
return ::atomicAdd(address, val);
}
__device__ __forceinline__ unsigned int cvAtomicAdd(unsigned int* address, unsigned int val)
{
return ::atomicAdd(address, val);
}
__device__ __forceinline__ float cvAtomicAdd(float* address, float val)
{
#if __CUDA_ARCH__ >= 200
return ::atomicAdd(address, val);
#else
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(val + __int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
#endif
}
__device__ __forceinline__ double cvAtomicAdd(double* address, double val)
{
#if __CUDA_ARCH__ >= 130
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
#else
(void) address;
(void) val;
return 0.0;
#endif
}
__device__ __forceinline__ int cvAtomicMin(int* address, int val)
{
return ::atomicMin(address, val);
}
__device__ __forceinline__ float cvAtomicMin(float* address, float val)
{
#if __CUDA_ARCH__ >= 120
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
#else
(void) address;
(void) val;
return 0.0f;
#endif
}
__device__ __forceinline__ double cvAtomicMin(double* address, double val)
{
#if __CUDA_ARCH__ >= 130
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_ull, assumed,
__double_as_longlong(::fmin(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
#else
(void) address;
(void) val;
return 0.0;
#endif
}
__device__ __forceinline__ int cvAtomicMax(int* address, int val)
{
return ::atomicMax(address, val);
}
__device__ __forceinline__ float cvAtomicMax(float* address, float val)
{
#if __CUDA_ARCH__ >= 120
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
#else
(void) address;
(void) val;
return 0.0f;
#endif
}
__device__ __forceinline__ double cvAtomicMax(double* address, double val)
{
#if __CUDA_ARCH__ >= 130
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_ull, assumed,
__double_as_longlong(::fmax(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
#else
(void) address;
(void) val;
return 0.0;
#endif
}
}
namespace detail
{
template <int cn> struct Unroll;
template <> struct Unroll<1>
{
template <int BLOCK_SIZE, typename R>
static __device__ __forceinline__ volatile R* smem_tuple(R* smem)
{
return smem;
}
template <typename R>
static __device__ __forceinline__ R& tie(R& val)
{
return val;
}
template <class Op>
static __device__ __forceinline__ const Op& op(const Op& op)
{
return op;
}
};
template <> struct Unroll<2>
{
template <int BLOCK_SIZE, typename R>
static __device__ __forceinline__ thrust::tuple<volatile R*, volatile R*> smem_tuple(R* smem)
{
return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE);
}
template <typename R>
static __device__ __forceinline__ thrust::tuple<typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&> tie(R& val)
{
return thrust::tie(val.x, val.y);
}
template <class Op>
static __device__ __forceinline__ const thrust::tuple<Op, Op> op(const Op& op)
{
return thrust::make_tuple(op, op);
}
};
template <> struct Unroll<3>
{
template <int BLOCK_SIZE, typename R>
static __device__ __forceinline__ thrust::tuple<volatile R*, volatile R*, volatile R*> smem_tuple(R* smem)
{
return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE);
}
template <typename R>
static __device__ __forceinline__ thrust::tuple<typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&> tie(R& val)
{
return thrust::tie(val.x, val.y, val.z);
}
template <class Op>
static __device__ __forceinline__ const thrust::tuple<Op, Op, Op> op(const Op& op)
{
return thrust::make_tuple(op, op, op);
}
};
template <> struct Unroll<4>
{
template <int BLOCK_SIZE, typename R>
static __device__ __forceinline__ thrust::tuple<volatile R*, volatile R*, volatile R*, volatile R*> smem_tuple(R* smem)
{
return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE);
}
template <typename R>
static __device__ __forceinline__ thrust::tuple<typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&> tie(R& val)
{
return thrust::tie(val.x, val.y, val.z, val.w);
}
template <class Op>
static __device__ __forceinline__ const thrust::tuple<Op, Op, Op, Op> op(const Op& op)
{
return thrust::make_tuple(op, op, op, op);
}
};
}
/////////////////////////////////////////////////////////////
// sum
namespace sum
{
__device__ unsigned int blocks_finished = 0;
template <typename R, int cn> struct AtomicAdd;
template <typename R> struct AtomicAdd<R, 1>
{
static __device__ void run(R* ptr, R val)
{
detail::cvAtomicAdd(ptr, val);
}
};
template <typename R> struct AtomicAdd<R, 2>
{
typedef typename TypeVec<R, 2>::vec_type val_type;
static __device__ void run(R* ptr, val_type val)
{
detail::cvAtomicAdd(ptr, val.x);
detail::cvAtomicAdd(ptr + 1, val.y);
}
};
template <typename R> struct AtomicAdd<R, 3>
{
typedef typename TypeVec<R, 3>::vec_type val_type;
static __device__ void run(R* ptr, val_type val)
{
detail::cvAtomicAdd(ptr, val.x);
detail::cvAtomicAdd(ptr + 1, val.y);
detail::cvAtomicAdd(ptr + 2, val.z);
}
};
template <typename R> struct AtomicAdd<R, 4>
{
typedef typename TypeVec<R, 4>::vec_type val_type;
static __device__ void run(R* ptr, val_type val)
{
detail::cvAtomicAdd(ptr, val.x);
detail::cvAtomicAdd(ptr + 1, val.y);
detail::cvAtomicAdd(ptr + 2, val.z);
detail::cvAtomicAdd(ptr + 3, val.w);
}
};
template <int BLOCK_SIZE, typename R, int cn>
struct GlobalReduce
{
typedef typename TypeVec<R, cn>::vec_type result_type;
static __device__ void run(result_type& sum, result_type* result, int tid, int bid, R* smem)
{
#if __CUDA_ARCH__ >= 200
if (tid == 0)
AtomicAdd<R, cn>::run((R*) result, sum);
#else
__shared__ bool is_last;
if (tid == 0)
{
result[bid] = sum;
__threadfence();
unsigned int ticket = ::atomicAdd(&blocks_finished, 1);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
sum = tid < gridDim.x * gridDim.y ? result[tid] : VecTraits<result_type>::all(0);
device::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(sum), tid, detail::Unroll<cn>::op(plus<R>()));
if (tid == 0)
{
result[0] = sum;
blocks_finished = 0;
}
}
#endif
}
};
template <int BLOCK_SIZE, typename src_type, typename result_type, class Mask, class Op>
__global__ void kernel(const PtrStepSz<src_type> src, result_type* result, const Mask mask, const Op op, const int twidth, const int theight)
{
typedef typename VecTraits<src_type>::elem_type T;
typedef typename VecTraits<result_type>::elem_type R;
const int cn = VecTraits<src_type>::cn;
__shared__ R smem[BLOCK_SIZE * cn];
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
result_type sum = VecTraits<result_type>::all(0);
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
{
const src_type* ptr = src.ptr(y);
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
{
if (mask(y, x))
{
const src_type srcVal = ptr[x];
sum = sum + op(saturate_cast<result_type>(srcVal));
}
}
}
device::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(sum), tid, detail::Unroll<cn>::op(plus<R>()));
GlobalReduce<BLOCK_SIZE, R, cn>::run(sum, result, tid, bid, smem);
}
const int threads_x = 32;
const int threads_y = 8;
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
{
block = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, block.x * block.y),
divUp(rows, block.y * block.x));
grid.x = ::min(grid.x, block.x);
grid.y = ::min(grid.y, block.y);
}
void getBufSize(int cols, int rows, int cn, int& bufcols, int& bufrows)
{
dim3 block, grid;
getLaunchCfg(cols, rows, block, grid);
bufcols = grid.x * grid.y * sizeof(double) * cn;
bufrows = 1;
}
template <typename T, typename R, int cn, template <typename> class Op>
void caller(PtrStepSzb src_, void* buf_, double* out, PtrStepSzb mask)
{
typedef typename TypeVec<T, cn>::vec_type src_type;
typedef typename TypeVec<R, cn>::vec_type result_type;
PtrStepSz<src_type> src(src_);
result_type* buf = (result_type*) buf_;
dim3 block, grid;
getLaunchCfg(src.cols, src.rows, block, grid);
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
const int theight = divUp(divUp(src.rows, grid.y), block.y);
Op<result_type> op;
if (mask.data)
kernel<threads_x * threads_y><<<grid, block>>>(src, buf, SingleMask(mask), op, twidth, theight);
else
kernel<threads_x * threads_y><<<grid, block>>>(src, buf, WithOutMask(), op, twidth, theight);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall( cudaMemcpy(&result, buf, sizeof(result_type), cudaMemcpyDeviceToHost) );
out[0] = result[0];
out[1] = result[1];
out[2] = result[2];
out[3] = result[3];
}
template <typename T> struct SumType;
template <> struct SumType<uchar> { typedef unsigned int R; };
template <> struct SumType<schar> { typedef int R; };
template <> struct SumType<ushort> { typedef unsigned int R; };
template <> struct SumType<short> { typedef int R; };
template <> struct SumType<int> { typedef int R; };
template <> struct SumType<float> { typedef float R; };
template <> struct SumType<double> { typedef double R; };
template <typename T, int cn>
void run(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask)
{
typedef typename SumType<T>::R R;
caller<T, R, cn, identity>(src, buf, out, mask);
}
template void run<uchar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<uchar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<uchar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<uchar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<schar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<schar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<schar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<schar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<ushort, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<ushort, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<ushort, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<ushort, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<short, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<short, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<short, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<short, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<int, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<int, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<int, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<int, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<float, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<float, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<float, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<float, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<double, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<double, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<double, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<double, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template <typename T, int cn>
void runAbs(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask)
{
typedef typename SumType<T>::R R;
caller<T, R, cn, abs_func>(src, buf, out, mask);
}
template void runAbs<uchar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<uchar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<uchar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<uchar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<schar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<schar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<schar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<schar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<ushort, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<ushort, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<ushort, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<ushort, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<short, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<short, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<short, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<short, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<int, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<int, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<int, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<int, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<float, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<float, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<float, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<float, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<double, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<double, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<double, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<double, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template <typename T> struct Sqr : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(T x) const
{
return x * x;
}
};
template <typename T, int cn>
void runSqr(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask)
{
caller<T, double, cn, Sqr>(src, buf, out, mask);
}
template void runSqr<uchar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<uchar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<uchar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<uchar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<schar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<schar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<schar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<schar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<ushort, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<ushort, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<ushort, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<ushort, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<short, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<short, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<short, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<short, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<int, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<int, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<int, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<int, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<float, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<float, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<float, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<float, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<double, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<double, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<double, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<double, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
}
/////////////////////////////////////////////////////////////
// minMax
namespace minMax
{
__device__ unsigned int blocks_finished = 0;
// To avoid shared bank conflicts we convert each value into value of
// appropriate type (32 bits minimum)
template <typename T> struct MinMaxTypeTraits;
template <> struct MinMaxTypeTraits<uchar> { typedef int best_type; };
template <> struct MinMaxTypeTraits<schar> { typedef int best_type; };
template <> struct MinMaxTypeTraits<ushort> { typedef int best_type; };
template <> struct MinMaxTypeTraits<short> { typedef int best_type; };
template <> struct MinMaxTypeTraits<int> { typedef int best_type; };
template <> struct MinMaxTypeTraits<float> { typedef float best_type; };
template <> struct MinMaxTypeTraits<double> { typedef double best_type; };
template <int BLOCK_SIZE, typename R>
struct GlobalReduce
{
static __device__ void run(R& mymin, R& mymax, R* minval, R* maxval, int tid, int bid, R* sminval, R* smaxval)
{
#if __CUDA_ARCH__ >= 200
if (tid == 0)
{
detail::cvAtomicMin(minval, mymin);
detail::cvAtomicMax(maxval, mymax);
}
#else
__shared__ bool is_last;
if (tid == 0)
{
minval[bid] = mymin;
maxval[bid] = mymax;
__threadfence();
unsigned int ticket = ::atomicAdd(&blocks_finished, 1);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
int idx = ::min(tid, gridDim.x * gridDim.y - 1);
mymin = minval[idx];
mymax = maxval[idx];
const minimum<R> minOp;
const maximum<R> maxOp;
device::reduce<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax), tid, thrust::make_tuple(minOp, maxOp));
if (tid == 0)
{
minval[0] = mymin;
maxval[0] = mymax;
blocks_finished = 0;
}
}
#endif
}
};
template <int BLOCK_SIZE, typename T, typename R, class Mask>
__global__ void kernel(const PtrStepSz<T> src, const Mask mask, R* minval, R* maxval, const int twidth, const int theight)
{
__shared__ R sminval[BLOCK_SIZE];
__shared__ R smaxval[BLOCK_SIZE];
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
R mymin = numeric_limits<R>::max();
R mymax = -numeric_limits<R>::max();
const minimum<R> minOp;
const maximum<R> maxOp;
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
{
const T* ptr = src.ptr(y);
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
{
if (mask(y, x))
{
const R srcVal = ptr[x];
mymin = minOp(mymin, srcVal);
mymax = maxOp(mymax, srcVal);
}
}
}
device::reduce<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax), tid, thrust::make_tuple(minOp, maxOp));
GlobalReduce<BLOCK_SIZE, R>::run(mymin, mymax, minval, maxval, tid, bid, sminval, smaxval);
}
const int threads_x = 32;
const int threads_y = 8;
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
{
block = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, block.x * block.y),
divUp(rows, block.y * block.x));
grid.x = ::min(grid.x, block.x);
grid.y = ::min(grid.y, block.y);
}
void getBufSize(int cols, int rows, int& bufcols, int& bufrows)
{
dim3 block, grid;
getLaunchCfg(cols, rows, block, grid);
bufcols = grid.x * grid.y * sizeof(double);
bufrows = 2;
}
__global__ void setDefaultKernel(int* minval_buf, int* maxval_buf)
{
*minval_buf = numeric_limits<int>::max();
*maxval_buf = numeric_limits<int>::min();
}
__global__ void setDefaultKernel(float* minval_buf, float* maxval_buf)
{
*minval_buf = numeric_limits<float>::max();
*maxval_buf = -numeric_limits<float>::max();
}
__global__ void setDefaultKernel(double* minval_buf, double* maxval_buf)
{
*minval_buf = numeric_limits<double>::max();
*maxval_buf = -numeric_limits<double>::max();
}
template <typename R>
void setDefault(R* minval_buf, R* maxval_buf)
{
setDefaultKernel<<<1, 1>>>(minval_buf, maxval_buf);
}
template <typename T>
void run(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf)
{
typedef typename MinMaxTypeTraits<T>::best_type R;
dim3 block, grid;
getLaunchCfg(src.cols, src.rows, block, grid);
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
const int theight = divUp(divUp(src.rows, grid.y), block.y);
R* minval_buf = (R*) buf.ptr(0);
R* maxval_buf = (R*) buf.ptr(1);
setDefault(minval_buf, maxval_buf);
if (mask.data)
kernel<threads_x * threads_y><<<grid, block>>>((PtrStepSz<T>) src, SingleMask(mask), minval_buf, maxval_buf, twidth, theight);
else
kernel<threads_x * threads_y><<<grid, block>>>((PtrStepSz<T>) src, WithOutMask(), minval_buf, maxval_buf, twidth, theight);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
R minval_, maxval_;
cudaSafeCall( cudaMemcpy(&minval_, minval_buf, sizeof(R), cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(&maxval_, maxval_buf, sizeof(R), cudaMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void run<uchar >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template void run<schar >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template void run<ushort>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template void run<short >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template void run<int >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template void run<float >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template void run<double>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
}
/////////////////////////////////////////////////////////////
// minMaxLoc
namespace minMaxLoc
{
// To avoid shared bank conflicts we convert each value into value of
// appropriate type (32 bits minimum)
template <typename T> struct MinMaxTypeTraits;
template <> struct MinMaxTypeTraits<unsigned char> { typedef int best_type; };
template <> struct MinMaxTypeTraits<signed char> { typedef int best_type; };
template <> struct MinMaxTypeTraits<unsigned short> { typedef int best_type; };
template <> struct MinMaxTypeTraits<short> { typedef int best_type; };
template <> struct MinMaxTypeTraits<int> { typedef int best_type; };
template <> struct MinMaxTypeTraits<float> { typedef float best_type; };
template <> struct MinMaxTypeTraits<double> { typedef double best_type; };
template <int BLOCK_SIZE, typename T, class Mask>
__global__ void kernel_pass_1(const PtrStepSz<T> src, const Mask mask, T* minval, T* maxval, unsigned int* minloc, unsigned int* maxloc, const int twidth, const int theight)
{
typedef typename MinMaxTypeTraits<T>::best_type work_type;
__shared__ work_type sminval[BLOCK_SIZE];
__shared__ work_type smaxval[BLOCK_SIZE];
__shared__ unsigned int sminloc[BLOCK_SIZE];
__shared__ unsigned int smaxloc[BLOCK_SIZE];
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
work_type mymin = numeric_limits<work_type>::max();
work_type mymax = -numeric_limits<work_type>::max();
unsigned int myminloc = 0;
unsigned int mymaxloc = 0;
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
{
const T* ptr = src.ptr(y);
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
{
if (mask(y, x))
{
const work_type srcVal = ptr[x];
if (srcVal < mymin)
{
mymin = srcVal;
myminloc = y * src.cols + x;
}
if (srcVal > mymax)
{
mymax = srcVal;
mymaxloc = y * src.cols + x;
}
}
}
}
reduceKeyVal<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax),
smem_tuple(sminloc, smaxloc), thrust::tie(myminloc, mymaxloc),
tid,
thrust::make_tuple(less<work_type>(), greater<work_type>()));
if (tid == 0)
{
minval[bid] = (T) mymin;
maxval[bid] = (T) mymax;
minloc[bid] = myminloc;
maxloc[bid] = mymaxloc;
}
}
template <int BLOCK_SIZE, typename T>
__global__ void kernel_pass_2(T* minval, T* maxval, unsigned int* minloc, unsigned int* maxloc, int count)
{
typedef typename MinMaxTypeTraits<T>::best_type work_type;
__shared__ work_type sminval[BLOCK_SIZE];
__shared__ work_type smaxval[BLOCK_SIZE];
__shared__ unsigned int sminloc[BLOCK_SIZE];
__shared__ unsigned int smaxloc[BLOCK_SIZE];
unsigned int idx = ::min(threadIdx.x, count - 1);
work_type mymin = minval[idx];
work_type mymax = maxval[idx];
unsigned int myminloc = minloc[idx];
unsigned int mymaxloc = maxloc[idx];
reduceKeyVal<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax),
smem_tuple(sminloc, smaxloc), thrust::tie(myminloc, mymaxloc),
threadIdx.x,
thrust::make_tuple(less<work_type>(), greater<work_type>()));
if (threadIdx.x == 0)
{
minval[0] = (T) mymin;
maxval[0] = (T) mymax;
minloc[0] = myminloc;
maxloc[0] = mymaxloc;
}
}
const int threads_x = 32;
const int threads_y = 8;
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
{
block = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, block.x * block.y),
divUp(rows, block.y * block.x));
grid.x = ::min(grid.x, block.x);
grid.y = ::min(grid.y, block.y);
}
void getBufSize(int cols, int rows, size_t elem_size, int& b1cols, int& b1rows, int& b2cols, int& b2rows)
{
dim3 block, grid;
getLaunchCfg(cols, rows, block, grid);
// For values
b1cols = grid.x * grid.y * elem_size;
b1rows = 2;
// For locations
b2cols = grid.x * grid.y * sizeof(int);
b2rows = 2;
}
template <typename T>
void run(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf)
{
dim3 block, grid;
getLaunchCfg(src.cols, src.rows, block, grid);
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
const int theight = divUp(divUp(src.rows, grid.y), block.y);
T* minval_buf = (T*) valbuf.ptr(0);
T* maxval_buf = (T*) valbuf.ptr(1);
unsigned int* minloc_buf = locbuf.ptr(0);
unsigned int* maxloc_buf = locbuf.ptr(1);
if (mask.data)
kernel_pass_1<threads_x * threads_y><<<grid, block>>>((PtrStepSz<T>) src, SingleMask(mask), minval_buf, maxval_buf, minloc_buf, maxloc_buf, twidth, theight);
else
kernel_pass_1<threads_x * threads_y><<<grid, block>>>((PtrStepSz<T>) src, WithOutMask(), minval_buf, maxval_buf, minloc_buf, maxloc_buf, twidth, theight);
cudaSafeCall( cudaGetLastError() );
kernel_pass_2<threads_x * threads_y><<<1, threads_x * threads_y>>>(minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
unsigned int minloc_, maxloc_;
cudaSafeCall( cudaMemcpy(&minloc_, minloc_buf, sizeof(unsigned int), cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(&maxloc_, maxloc_buf, sizeof(unsigned int), cudaMemcpyDeviceToHost) );
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void run<unsigned char >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
template void run<signed char >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
template void run<unsigned short>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
template void run<short >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
template void run<int >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
template void run<float >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
template void run<double>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
}
/////////////////////////////////////////////////////////////
// countNonZero
namespace countNonZero
{
__device__ unsigned int blocks_finished = 0;
template <int BLOCK_SIZE, typename T>
__global__ void kernel(const PtrStepSz<T> src, unsigned int* count, const int twidth, const int theight)
{
__shared__ unsigned int scount[BLOCK_SIZE];
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int mycount = 0;
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
{
const T* ptr = src.ptr(y);
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
{
const T srcVal = ptr[x];
mycount += (srcVal != 0);
}
}
device::reduce<BLOCK_SIZE>(scount, mycount, tid, plus<unsigned int>());
#if __CUDA_ARCH__ >= 200
if (tid == 0)
::atomicAdd(count, mycount);
#else
__shared__ bool is_last;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
if (tid == 0)
{
count[bid] = mycount;
__threadfence();
unsigned int ticket = ::atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
mycount = tid < gridDim.x * gridDim.y ? count[tid] : 0;
device::reduce<BLOCK_SIZE>(scount, mycount, tid, plus<unsigned int>());
if (tid == 0)
{
count[0] = mycount;
blocks_finished = 0;
}
}
#endif
}
const int threads_x = 32;
const int threads_y = 8;
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
{
block = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, block.x * block.y),
divUp(rows, block.y * block.x));
grid.x = ::min(grid.x, block.x);
grid.y = ::min(grid.y, block.y);
}
void getBufSize(int cols, int rows, int& bufcols, int& bufrows)
{
dim3 block, grid;
getLaunchCfg(cols, rows, block, grid);
bufcols = grid.x * grid.y * sizeof(int);
bufrows = 1;
}
template <typename T>
int run(const PtrStepSzb src, PtrStep<unsigned int> buf)
{
dim3 block, grid;
getLaunchCfg(src.cols, src.rows, block, grid);
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
const int theight = divUp(divUp(src.rows, grid.y), block.y);
unsigned int* count_buf = buf.ptr(0);
cudaSafeCall( cudaMemset(count_buf, 0, sizeof(unsigned int)) );
kernel<threads_x * threads_y><<<grid, block>>>((PtrStepSz<T>) src, count_buf, twidth, theight);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
unsigned int count;
cudaSafeCall(cudaMemcpy(&count, count_buf, sizeof(unsigned int), cudaMemcpyDeviceToHost));
return count;
}
template int run<uchar >(const PtrStepSzb src, PtrStep<unsigned int> buf);
template int run<schar >(const PtrStepSzb src, PtrStep<unsigned int> buf);
template int run<ushort>(const PtrStepSzb src, PtrStep<unsigned int> buf);
template int run<short >(const PtrStepSzb src, PtrStep<unsigned int> buf);
template int run<int >(const PtrStepSzb src, PtrStep<unsigned int> buf);
template int run<float >(const PtrStepSzb src, PtrStep<unsigned int> buf);
template int run<double>(const PtrStepSzb src, PtrStep<unsigned int> buf);
}
//////////////////////////////////////////////////////////////////////////////
// reduce
namespace reduce
{
struct Sum
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(0);
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
return a + b;
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__device__ __forceinline__ Sum() {}
__device__ __forceinline__ Sum(const Sum&) {}
};
struct Avg
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(0);
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
return a + b;
}
template <typename T>
__device__ __forceinline__ typename TypeVec<double, VecTraits<T>::cn>::vec_type result(T r, double sz) const
{
return r / sz;
}
__device__ __forceinline__ Avg() {}
__device__ __forceinline__ Avg(const Avg&) {}
};
struct Min
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(numeric_limits<typename VecTraits<T>::elem_type>::max());
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
minimum<T> minOp;
return minOp(a, b);
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__device__ __forceinline__ Min() {}
__device__ __forceinline__ Min(const Min&) {}
};
struct Max
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(-numeric_limits<typename VecTraits<T>::elem_type>::max());
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
maximum<T> maxOp;
return maxOp(a, b);
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__device__ __forceinline__ Max() {}
__device__ __forceinline__ Max(const Max&) {}
};
///////////////////////////////////////////////////////////
template <typename T, typename S, typename D, class Op>
__global__ void rowsKernel(const PtrStepSz<T> src, D* dst, const Op op)
{
__shared__ S smem[16 * 16];
const int x = blockIdx.x * 16 + threadIdx.x;
S myVal = op.template startValue<S>();
if (x < src.cols)
{
for (int y = threadIdx.y; y < src.rows; y += 16)
{
S srcVal = src(y, x);
myVal = op(myVal, srcVal);
}
}
smem[threadIdx.x * 16 + threadIdx.y] = myVal;
__syncthreads();
volatile S* srow = smem + threadIdx.y * 16;
myVal = srow[threadIdx.x];
device::reduce<16>(srow, myVal, threadIdx.x, op);
if (threadIdx.x == 0)
srow[0] = myVal;
__syncthreads();
if (threadIdx.y == 0 && x < src.cols)
dst[x] = (D) op.result(smem[threadIdx.x * 16], src.rows);
}
template <typename T, typename S, typename D, class Op>
void rowsCaller(PtrStepSz<T> src, D* dst, cudaStream_t stream)
{
const dim3 block(16, 16);
const dim3 grid(divUp(src.cols, block.x));
Op op;
rowsKernel<T, S, D, Op><<<grid, block, 0, stream>>>(src, dst, op);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, typename S, typename D>
void rows(PtrStepSzb src, void* dst, int op, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSz<T> src, D* dst, cudaStream_t stream);
static const func_t funcs[] =
{
rowsCaller<T, S, D, Sum>,
rowsCaller<T, S, D, Avg>,
rowsCaller<T, S, D, Max>,
rowsCaller<T, S, D, Min>
};
funcs[op]((PtrStepSz<T>) src, (D*) dst, stream);
}
template void rows<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned char, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned char, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned char, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned short, int, unsigned short>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned short, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned short, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned short, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<short, int, short>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<short, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<short, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<short, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<int, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<int, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<int, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<float, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<float, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<double, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
///////////////////////////////////////////////////////////
template <int BLOCK_SIZE, typename T, typename S, typename D, int cn, class Op>
__global__ void colsKernel(const PtrStepSz<typename TypeVec<T, cn>::vec_type> src, typename TypeVec<D, cn>::vec_type* dst, const Op op)
{
typedef typename TypeVec<T, cn>::vec_type src_type;
typedef typename TypeVec<S, cn>::vec_type work_type;
typedef typename TypeVec<D, cn>::vec_type dst_type;
__shared__ S smem[BLOCK_SIZE * cn];
const int y = blockIdx.x;
const src_type* srcRow = src.ptr(y);
work_type myVal = op.template startValue<work_type>();
for (int x = threadIdx.x; x < src.cols; x += BLOCK_SIZE)
myVal = op(myVal, saturate_cast<work_type>(srcRow[x]));
device::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(myVal), threadIdx.x, detail::Unroll<cn>::op(op));
if (threadIdx.x == 0)
dst[y] = saturate_cast<dst_type>(op.result(myVal, src.cols));
}
template <typename T, typename S, typename D, int cn, class Op> void colsCaller(PtrStepSzb src, void* dst, cudaStream_t stream)
{
const int BLOCK_SIZE = 256;
const dim3 block(BLOCK_SIZE);
const dim3 grid(src.rows);
Op op;
colsKernel<BLOCK_SIZE, T, S, D, cn, Op><<<grid, block, 0, stream>>>((PtrStepSz<typename TypeVec<T, cn>::vec_type>) src, (typename TypeVec<D, cn>::vec_type*) dst, op);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, typename S, typename D> void cols(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, void* dst, cudaStream_t stream);
static const func_t funcs[5][4] =
{
{0,0,0,0},
{colsCaller<T, S, D, 1, Sum>, colsCaller<T, S, D, 1, Avg>, colsCaller<T, S, D, 1, Max>, colsCaller<T, S, D, 1, Min>},
{colsCaller<T, S, D, 2, Sum>, colsCaller<T, S, D, 2, Avg>, colsCaller<T, S, D, 2, Max>, colsCaller<T, S, D, 2, Min>},
{colsCaller<T, S, D, 3, Sum>, colsCaller<T, S, D, 3, Avg>, colsCaller<T, S, D, 3, Max>, colsCaller<T, S, D, 3, Min>},
{colsCaller<T, S, D, 4, Sum>, colsCaller<T, S, D, 4, Avg>, colsCaller<T, S, D, 4, Max>, colsCaller<T, S, D, 4, Min>},
};
funcs[cn][op](src, dst, stream);
}
template void cols<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned char, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned char, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned char, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned short, int, unsigned short>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned short, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned short, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned short, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<short, int, short>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<short, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<short, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<short, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<int, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<int, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<int, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<float, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<float, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<double, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
7e725cd7e90c40cfbcfcaefde0b05697b474f3e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Kukri.cuh"
using namespace kukri;
void kukri::Timer::tic() {
gpuErrChk(hipEventCreate(&m_start));
gpuErrChk(hipEventCreate(&m_stop));
gpuErrChk(hipEventRecord(m_start));
}
float kukri::Timer::toc() {
gpuErrChk(hipEventRecord(m_stop));
gpuErrChk(hipEventSynchronize(m_stop));
gpuErrChk(hipEventElapsedTime(&t, m_start, m_stop));
gpuErrChk(hipEventDestroy(m_start));
gpuErrChk(hipEventDestroy(m_stop));
return t;
}
void kukri::array_half2float_host(float *h_dst, half *h_src, size_t size) {
// Convert an array of half to an array of float, both of which are in host
float *d_dst;
half *d_src;
gpuErrChk(hipMalloc(&d_dst, size * sizeof(float)));
gpuErrChk(hipMalloc(&d_src, size * sizeof(half)));
gpuErrChk(hipMemcpy(d_src, h_src, size * sizeof(half), hipMemcpyHostToDevice));
kukri::array_half2float_device(d_dst, d_src, size);
gpuErrChk(hipMemcpy(h_dst, d_dst, size * sizeof(float), hipMemcpyDeviceToHost));
hipFree(d_dst);
hipFree(d_src);
}
void kukri::array_half2float_device(float *d_dst, half *d_src, size_t size) {
int block_size = 512;
int grid_size = 256;
hipLaunchKernelGGL(( kukri::_array_half2float_kernel) , dim3(grid_size), dim3(block_size), 0, 0, d_dst, d_src, size);
gpuErrChk(hipGetLastError());
}
__global__ void kukri::_array_half2float_kernel(float *d_dst, half *d_src, size_t size) {
size_t index = threadIdx.x + blockIdx.x * blockDim.x;
size_t step_size = blockDim.x * gridDim.x;
for (; index < size; index += step_size) {
d_dst[index] = __half2float(d_src[index]);
}
}
void kukri::array_float2half_host(half *h_dst, float *h_src, size_t size) {
// Convert an array of float to an array of half, both of which are in host
half *d_dst;
float *d_src;
gpuErrChk(hipMalloc(&d_dst, size * sizeof(half)));
gpuErrChk(hipMalloc(&d_src, size * sizeof(float)));
gpuErrChk(hipMemcpy(d_src, h_src, size * sizeof(float), hipMemcpyHostToDevice));
kukri::array_float2half_device(d_dst, d_src, size);
hipMemcpy(h_dst, d_dst, size * sizeof(half), hipMemcpyDeviceToHost);
gpuErrChk(hipFree(d_dst));
gpuErrChk(hipFree(d_src));
}
void kukri::array_float2half_device(half *d_dst, float *d_src, size_t size) {
int block_size = 512;
int grid_size = 256;
hipLaunchKernelGGL(( kukri::_array_float2half_kernel), dim3(grid_size), dim3(block_size), 0, 0, d_dst, d_src, size);
gpuErrChk(hipGetLastError());
}
__global__ void kukri::_array_float2half_kernel(half *d_dst, float *d_src, size_t size) {
size_t index = threadIdx.x + blockIdx.x * blockDim.x;
size_t step_size = blockDim.x * gridDim.x;
for (; index < size; index += step_size) {
d_dst[index] = __float2half_rn(d_src[index]);
}
}
| 7e725cd7e90c40cfbcfcaefde0b05697b474f3e7.cu | #include "Kukri.cuh"
using namespace kukri;
void kukri::Timer::tic() {
gpuErrChk(cudaEventCreate(&m_start));
gpuErrChk(cudaEventCreate(&m_stop));
gpuErrChk(cudaEventRecord(m_start));
}
float kukri::Timer::toc() {
gpuErrChk(cudaEventRecord(m_stop));
gpuErrChk(cudaEventSynchronize(m_stop));
gpuErrChk(cudaEventElapsedTime(&t, m_start, m_stop));
gpuErrChk(cudaEventDestroy(m_start));
gpuErrChk(cudaEventDestroy(m_stop));
return t;
}
void kukri::array_half2float_host(float *h_dst, half *h_src, size_t size) {
// Convert an array of half to an array of float, both of which are in host
float *d_dst;
half *d_src;
gpuErrChk(cudaMalloc(&d_dst, size * sizeof(float)));
gpuErrChk(cudaMalloc(&d_src, size * sizeof(half)));
gpuErrChk(cudaMemcpy(d_src, h_src, size * sizeof(half), cudaMemcpyHostToDevice));
kukri::array_half2float_device(d_dst, d_src, size);
gpuErrChk(cudaMemcpy(h_dst, d_dst, size * sizeof(float), cudaMemcpyDeviceToHost));
cudaFree(d_dst);
cudaFree(d_src);
}
void kukri::array_half2float_device(float *d_dst, half *d_src, size_t size) {
int block_size = 512;
int grid_size = 256;
kukri::_array_half2float_kernel <<<grid_size, block_size>>> (d_dst, d_src, size);
gpuErrChk(cudaGetLastError());
}
__global__ void kukri::_array_half2float_kernel(float *d_dst, half *d_src, size_t size) {
size_t index = threadIdx.x + blockIdx.x * blockDim.x;
size_t step_size = blockDim.x * gridDim.x;
for (; index < size; index += step_size) {
d_dst[index] = __half2float(d_src[index]);
}
}
void kukri::array_float2half_host(half *h_dst, float *h_src, size_t size) {
// Convert an array of float to an array of half, both of which are in host
half *d_dst;
float *d_src;
gpuErrChk(cudaMalloc(&d_dst, size * sizeof(half)));
gpuErrChk(cudaMalloc(&d_src, size * sizeof(float)));
gpuErrChk(cudaMemcpy(d_src, h_src, size * sizeof(float), cudaMemcpyHostToDevice));
kukri::array_float2half_device(d_dst, d_src, size);
cudaMemcpy(h_dst, d_dst, size * sizeof(half), cudaMemcpyDeviceToHost);
gpuErrChk(cudaFree(d_dst));
gpuErrChk(cudaFree(d_src));
}
void kukri::array_float2half_device(half *d_dst, float *d_src, size_t size) {
int block_size = 512;
int grid_size = 256;
kukri::_array_float2half_kernel<<<grid_size, block_size>>>(d_dst, d_src, size);
gpuErrChk(cudaGetLastError());
}
__global__ void kukri::_array_float2half_kernel(half *d_dst, float *d_src, size_t size) {
size_t index = threadIdx.x + blockIdx.x * blockDim.x;
size_t step_size = blockDim.x * gridDim.x;
for (; index < size; index += step_size) {
d_dst[index] = __float2half_rn(d_src[index]);
}
}
|
650ab58e43f9baea6eef9d98871fec73ec4759ee.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2020 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
//
#include "ATen/hip/HIPContext.h"
#include "open3d/ml/impl/misc/Voxelize.cuh"
#include "open3d/ml/pytorch/TorchHelper.h"
#include "open3d/ml/pytorch/misc/VoxelizeOpKernel.h"
#include "torch/script.h"
using namespace open3d::ml::impl;
template <class T>
void VoxelizeCUDA(const torch::Tensor& points,
const torch::Tensor& voxel_size,
const torch::Tensor& points_range_min,
const torch::Tensor& points_range_max,
const int64_t max_points_per_voxel,
const int64_t max_voxels,
torch::Tensor& voxel_coords,
torch::Tensor& voxel_point_indices,
torch::Tensor& voxel_point_row_splits) {
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto cuda_device_props = at::cuda::getCurrentDeviceProperties();
const int texture_alignment = cuda_device_props->textureAlignment;
VoxelizeOutputAllocator output_allocator(points.device().type(),
points.device().index());
switch (points.size(1)) {
#define CASE(NDIM) \
case NDIM: { \
void* temp_ptr = nullptr; \
size_t temp_size = 0; \
VoxelizeCUDA<T, NDIM>( \
stream, temp_ptr, temp_size, texture_alignment, \
points.size(0), points.data_ptr<T>(), \
voxel_size.data_ptr<T>(), points_range_min.data_ptr<T>(), \
points_range_max.data_ptr<T>(), max_points_per_voxel, \
max_voxels, output_allocator); \
\
auto temp_tensor = \
CreateTempTensor(temp_size, points.device(), &temp_ptr); \
\
VoxelizeCUDA<T, NDIM>( \
stream, temp_ptr, temp_size, texture_alignment, \
points.size(0), points.data_ptr<T>(), \
voxel_size.data_ptr<T>(), points_range_min.data_ptr<T>(), \
points_range_max.data_ptr<T>(), max_points_per_voxel, \
max_voxels, output_allocator); \
} break;
CASE(1)
CASE(2)
CASE(3)
CASE(4)
CASE(5)
CASE(6)
CASE(7)
CASE(8)
default:
break; // will be handled by the generic torch function
#undef CASE
}
voxel_coords = output_allocator.VoxelCoords();
voxel_point_indices = output_allocator.VoxelPointIndices();
voxel_point_row_splits = output_allocator.VoxelPointRowSplits();
}
#define INSTANTIATE(T) \
template void VoxelizeCUDA<T>( \
const torch::Tensor& points, const torch::Tensor& voxel_size, \
const torch::Tensor& points_range_min, \
const torch::Tensor& points_range_max, \
const int64_t max_points_per_voxel, const int64_t max_voxels, \
torch::Tensor& voxel_coords, torch::Tensor& voxel_point_indices, \
torch::Tensor& voxel_point_row_splits);
INSTANTIATE(float)
INSTANTIATE(double)
| 650ab58e43f9baea6eef9d98871fec73ec4759ee.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2020 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
//
#include "ATen/cuda/CUDAContext.h"
#include "open3d/ml/impl/misc/Voxelize.cuh"
#include "open3d/ml/pytorch/TorchHelper.h"
#include "open3d/ml/pytorch/misc/VoxelizeOpKernel.h"
#include "torch/script.h"
using namespace open3d::ml::impl;
template <class T>
void VoxelizeCUDA(const torch::Tensor& points,
const torch::Tensor& voxel_size,
const torch::Tensor& points_range_min,
const torch::Tensor& points_range_max,
const int64_t max_points_per_voxel,
const int64_t max_voxels,
torch::Tensor& voxel_coords,
torch::Tensor& voxel_point_indices,
torch::Tensor& voxel_point_row_splits) {
auto stream = at::cuda::getCurrentCUDAStream();
auto cuda_device_props = at::cuda::getCurrentDeviceProperties();
const int texture_alignment = cuda_device_props->textureAlignment;
VoxelizeOutputAllocator output_allocator(points.device().type(),
points.device().index());
switch (points.size(1)) {
#define CASE(NDIM) \
case NDIM: { \
void* temp_ptr = nullptr; \
size_t temp_size = 0; \
VoxelizeCUDA<T, NDIM>( \
stream, temp_ptr, temp_size, texture_alignment, \
points.size(0), points.data_ptr<T>(), \
voxel_size.data_ptr<T>(), points_range_min.data_ptr<T>(), \
points_range_max.data_ptr<T>(), max_points_per_voxel, \
max_voxels, output_allocator); \
\
auto temp_tensor = \
CreateTempTensor(temp_size, points.device(), &temp_ptr); \
\
VoxelizeCUDA<T, NDIM>( \
stream, temp_ptr, temp_size, texture_alignment, \
points.size(0), points.data_ptr<T>(), \
voxel_size.data_ptr<T>(), points_range_min.data_ptr<T>(), \
points_range_max.data_ptr<T>(), max_points_per_voxel, \
max_voxels, output_allocator); \
} break;
CASE(1)
CASE(2)
CASE(3)
CASE(4)
CASE(5)
CASE(6)
CASE(7)
CASE(8)
default:
break; // will be handled by the generic torch function
#undef CASE
}
voxel_coords = output_allocator.VoxelCoords();
voxel_point_indices = output_allocator.VoxelPointIndices();
voxel_point_row_splits = output_allocator.VoxelPointRowSplits();
}
#define INSTANTIATE(T) \
template void VoxelizeCUDA<T>( \
const torch::Tensor& points, const torch::Tensor& voxel_size, \
const torch::Tensor& points_range_min, \
const torch::Tensor& points_range_max, \
const int64_t max_points_per_voxel, const int64_t max_voxels, \
torch::Tensor& voxel_coords, torch::Tensor& voxel_point_indices, \
torch::Tensor& voxel_point_row_splits);
INSTANTIATE(float)
INSTANTIATE(double)
|
37a63fdfa21bd9fb998c83176370d79a1d41862a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// @file copy_gpu.cu
// @brief Copy and other data operations (GPU)
// @author Andrea Vedaldi
/*
Copyright (C) 2015-16 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "copy.hpp"
#include "../datacu.hpp"
#include <string.h>
template<typename type> __global__ void
fill_kernel (type * data, size_t size, type value)
{
int index = threadIdx.x + blockIdx.x * blockDim.x ;
if (index < size) data[index] = value ;
}
template<typename type> __global__ void
copy_kernel (type *dst, type const *src, size_t size, type mult)
{
int index = threadIdx.x + blockIdx.x * blockDim.x ;
if (index < size) dst[index] = mult * src[index] ;
}
namespace vl { namespace impl {
template <typename type>
struct operations<vl::VLDT_GPU, type>
{
static vl::ErrorCode
copy(type * dst,
type const * src,
size_t numElements,
double mult)
{
if (mult == 1.0) {
hipMemcpy(dst, src, numElements * sizeof(type), hipMemcpyDeviceToDevice) ;
} else {
hipLaunchKernelGGL(( copy_kernel <type>)
, dim3(divideAndRoundUp(numElements, (size_t)VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS), 0, 0,
dst, src, numElements, mult) ;
hipError_t error = hipGetLastError() ;
if (error != hipSuccess) {
return VLE_Cuda ;
}
}
return VLE_Success ;
}
static vl::ErrorCode
fill(type * dst,
size_t numElements,
type value)
{
hipError_t error;
if(value == (type)0)
{
error = hipMemset(dst, 0, numElements*sizeof(type));
}
else
{
hipLaunchKernelGGL(( fill_kernel <type>)
, dim3(divideAndRoundUp(numElements, (size_t)VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS), 0, 0,
dst, numElements, value) ;
}
error = hipGetLastError() ;
if (error != hipSuccess) {
return VLE_Cuda ;
}
return VLE_Success ;
}
} ;
} }
template struct vl::impl::operations<vl::VLDT_GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::operations<vl::VLDT_GPU, double> ;
#endif
| 37a63fdfa21bd9fb998c83176370d79a1d41862a.cu | // @file copy_gpu.cu
// @brief Copy and other data operations (GPU)
// @author Andrea Vedaldi
/*
Copyright (C) 2015-16 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "copy.hpp"
#include "../datacu.hpp"
#include <string.h>
template<typename type> __global__ void
fill_kernel (type * data, size_t size, type value)
{
int index = threadIdx.x + blockIdx.x * blockDim.x ;
if (index < size) data[index] = value ;
}
template<typename type> __global__ void
copy_kernel (type *dst, type const *src, size_t size, type mult)
{
int index = threadIdx.x + blockIdx.x * blockDim.x ;
if (index < size) dst[index] = mult * src[index] ;
}
namespace vl { namespace impl {
template <typename type>
struct operations<vl::VLDT_GPU, type>
{
static vl::ErrorCode
copy(type * dst,
type const * src,
size_t numElements,
double mult)
{
if (mult == 1.0) {
cudaMemcpy(dst, src, numElements * sizeof(type), cudaMemcpyDeviceToDevice) ;
} else {
copy_kernel <type>
<<<divideAndRoundUp(numElements, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS>>>
(dst, src, numElements, mult) ;
cudaError_t error = cudaGetLastError() ;
if (error != cudaSuccess) {
return VLE_Cuda ;
}
}
return VLE_Success ;
}
static vl::ErrorCode
fill(type * dst,
size_t numElements,
type value)
{
cudaError_t error;
if(value == (type)0)
{
error = cudaMemset(dst, 0, numElements*sizeof(type));
}
else
{
fill_kernel <type>
<<<divideAndRoundUp(numElements, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS>>>
(dst, numElements, value) ;
}
error = cudaGetLastError() ;
if (error != cudaSuccess) {
return VLE_Cuda ;
}
return VLE_Success ;
}
} ;
} }
template struct vl::impl::operations<vl::VLDT_GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::operations<vl::VLDT_GPU, double> ;
#endif
|
079b6f5ac22b645fd6a2c69f416ae74b635c5df9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "DataFormats/Math/interface/choleskyInversion.h"
using namespace math::cholesky;
#include <Eigen/Core>
#include <Eigen/Eigenvalues>
#include <iomanip>
#include <memory>
#include <algorithm>
#include <chrono>
#include <random>
#include <cassert>
#include <iostream>
#include <limits>
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h"
template <typename M, int N>
__global__ void invert(M* mm, int n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
auto& m = mm[i];
printf("before %d %f %f %f\n", N, m(0, 0), m(1, 0), m(1, 1));
invertNN(m, m);
printf("after %d %f %f %f\n", N, m(0, 0), m(1, 0), m(1, 1));
}
template <typename M, int N>
__global__ void invertE(M* mm, int n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
auto& m = mm[i];
printf("before %d %f %f %f\n", N, m(0, 0), m(1, 0), m(1, 1));
m = m.inverse();
printf("after %d %f %f %f\n", N, m(0, 0), m(1, 0), m(1, 1));
}
// generate matrices
template <class M>
void genMatrix(M& m) {
using T = typename std::remove_reference<decltype(m(0, 0))>::type;
int n = M::ColsAtCompileTime;
std::mt19937 eng;
// std::mt19937 eng2;
std::uniform_real_distribution<T> rgen(0., 1.);
// generate first diagonal elemets
for (int i = 0; i < n; ++i) {
double maxVal = i * 10000 / (n - 1) + 1; // max condition is 10^4
m(i, i) = maxVal * rgen(eng);
}
for (int i = 0; i < n; ++i) {
for (int j = 0; j < i; ++j) {
double v = 0.3 * std::sqrt(m(i, i) * m(j, j)); // this makes the matrix pos defined
m(i, j) = v * rgen(eng);
m(j, i) = m(i, j);
}
}
}
template <int DIM>
using MXN = Eigen::Matrix<double, DIM, DIM>;
int main() {
cms::cudatest::requireDevices();
constexpr int DIM = 6;
using M = MXN<DIM>;
M m;
genMatrix(m);
printf("on CPU before %d %f %f %f\n", DIM, m(0, 0), m(1, 0), m(1, 1));
invertNN(m, m);
printf("on CPU after %d %f %f %f\n", DIM, m(0, 0), m(1, 0), m(1, 1));
double* d;
hipMalloc(&d, sizeof(M));
hipMemcpy(d, &m, sizeof(M), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( invert<M, DIM>), dim3(1), dim3(1), 0, 0, (M*)d, 1);
cudaCheck(hipDeviceSynchronize());
hipLaunchKernelGGL(( invertE<M, DIM>), dim3(1), dim3(1), 0, 0, (M*)d, 1);
cudaCheck(hipDeviceSynchronize());
return 0;
}
| 079b6f5ac22b645fd6a2c69f416ae74b635c5df9.cu | #include "DataFormats/Math/interface/choleskyInversion.h"
using namespace math::cholesky;
#include <Eigen/Core>
#include <Eigen/Eigenvalues>
#include <iomanip>
#include <memory>
#include <algorithm>
#include <chrono>
#include <random>
#include <cassert>
#include <iostream>
#include <limits>
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h"
template <typename M, int N>
__global__ void invert(M* mm, int n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
auto& m = mm[i];
printf("before %d %f %f %f\n", N, m(0, 0), m(1, 0), m(1, 1));
invertNN(m, m);
printf("after %d %f %f %f\n", N, m(0, 0), m(1, 0), m(1, 1));
}
template <typename M, int N>
__global__ void invertE(M* mm, int n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
auto& m = mm[i];
printf("before %d %f %f %f\n", N, m(0, 0), m(1, 0), m(1, 1));
m = m.inverse();
printf("after %d %f %f %f\n", N, m(0, 0), m(1, 0), m(1, 1));
}
// generate matrices
template <class M>
void genMatrix(M& m) {
using T = typename std::remove_reference<decltype(m(0, 0))>::type;
int n = M::ColsAtCompileTime;
std::mt19937 eng;
// std::mt19937 eng2;
std::uniform_real_distribution<T> rgen(0., 1.);
// generate first diagonal elemets
for (int i = 0; i < n; ++i) {
double maxVal = i * 10000 / (n - 1) + 1; // max condition is 10^4
m(i, i) = maxVal * rgen(eng);
}
for (int i = 0; i < n; ++i) {
for (int j = 0; j < i; ++j) {
double v = 0.3 * std::sqrt(m(i, i) * m(j, j)); // this makes the matrix pos defined
m(i, j) = v * rgen(eng);
m(j, i) = m(i, j);
}
}
}
template <int DIM>
using MXN = Eigen::Matrix<double, DIM, DIM>;
int main() {
cms::cudatest::requireDevices();
constexpr int DIM = 6;
using M = MXN<DIM>;
M m;
genMatrix(m);
printf("on CPU before %d %f %f %f\n", DIM, m(0, 0), m(1, 0), m(1, 1));
invertNN(m, m);
printf("on CPU after %d %f %f %f\n", DIM, m(0, 0), m(1, 0), m(1, 1));
double* d;
cudaMalloc(&d, sizeof(M));
cudaMemcpy(d, &m, sizeof(M), cudaMemcpyHostToDevice);
invert<M, DIM><<<1, 1>>>((M*)d, 1);
cudaCheck(cudaDeviceSynchronize());
invertE<M, DIM><<<1, 1>>>((M*)d, 1);
cudaCheck(cudaDeviceSynchronize());
return 0;
}
|
ae87c9f97ab2e82a287925dedb631d95ebf34d1f.hip | // !!! This is a file automatically generated by hipify!!!
// This file is part of ComputeStuff copyright (C) 2017 Christopher Dyken.
// Released under the MIT license, please see LICENSE file for details.
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
#include <iostream>
#include <vector>
#include <Scan.h>
namespace {
bool test = true;
bool perf = true;
bool inclusiveScan = true;
bool exclusiveScan = true;
bool offsetTable = true;
bool doCompact = true;
void logFailure(hipError_t error, const char *file, int line)
{
std::cerr << file << '@' << line << ": CUDA error: " << hipGetErrorName(error) << std::endl;
abort();
}
}
#define assertSuccess(a) do { hipError_t rv = (a); if(rv != hipSuccess) logFailure(rv, __FILE__, __LINE__); } while(0)
void assertMatching(const uint32_t* result, const uint32_t* gold, uint32_t N)
{
for (size_t i = 0; i < N; i++) {
auto a = result[i];
auto b = gold[i];
if (a != b) {
std::cerr << "a=" << a << " != b=" << b << std::endl;
abort();
}
}
}
void runTest(uint32_t N)
{
std::vector<uint32_t> offsets(N + 1);
std::vector<uint32_t> counts(N);
std::vector<uint32_t> offsetsGold(N + 1);
std::vector<uint32_t> compact(N);
std::vector<uint32_t> compactGold(N + 1);
uint32_t* sum_h, *sum_d;
assertSuccess(hipHostMalloc(&sum_h, sizeof(uint32_t), hipHostMallocMapped));
assertSuccess(hipHostGetDevicePointer(&sum_d, sum_h, 0));
uint32_t* output_d;
uint32_t* scratch_d;
uint32_t* input_d;
assertSuccess(hipMalloc(&output_d, sizeof(uint32_t)*(N + 1)));
assertSuccess(hipMalloc(&scratch_d, ComputeStuff::Scan::scratchByteSize(N)));
assertSuccess(hipMalloc(&input_d, sizeof(uint32_t)*N));
for (uint32_t modulo = 1; modulo < 10; modulo++) {
std::cerr << "N=" << N << ", modulo=" << modulo << ", scratch=" << ComputeStuff::Scan::scratchByteSize(N) / sizeof(uint32_t) << std::endl;
// Set up problem
offsetsGold[0] = 0;
uint32_t compactGold_sum = 0;
for (uint32_t i = 0; i < N; i++) {
counts[i] = modulo == 1 ? 1 : (i % modulo);
offsetsGold[i + 1] = offsetsGold[i] + counts[i];
compact[i] = modulo == 1 ? 1 : (i % modulo == 0 ? i + 1 : 0); // Any nonzero number flags surviving element.
if (compact[i] != 0) {
compactGold[compactGold_sum++] = i;
}
}
assertSuccess(hipMemcpy(input_d, counts.data(), sizeof(uint32_t)*N, hipMemcpyHostToDevice));
// Inclusive scan
// --------------
if (inclusiveScan) {
// Disjoint input and output
assertSuccess(hipMemset(output_d, ~0, N * sizeof(uint32_t)));
ComputeStuff::Scan::inclusiveScan(output_d, scratch_d, input_d, N);
assertSuccess(hipStreamSynchronize(0));
assertSuccess(hipGetLastError());
assertSuccess(hipMemcpy(offsets.data(), output_d, sizeof(uint32_t)*N, hipMemcpyDeviceToHost));
assertMatching(offsets.data(), offsetsGold.data() + 1, N);
}
// Exclusive scan
// --------------
if (exclusiveScan) {
// Disjoint input and output
assertSuccess(hipMemset(output_d, ~0, N * sizeof(uint32_t)));
ComputeStuff::Scan::exclusiveScan(output_d, scratch_d, input_d, N);
assertSuccess(hipStreamSynchronize(0));
assertSuccess(hipGetLastError());
assertSuccess(hipMemcpy(offsets.data(), output_d, sizeof(uint32_t)*N, hipMemcpyDeviceToHost));
assertMatching(offsets.data(), offsetsGold.data(), N);
// In-place
assertSuccess(hipMemcpy(output_d, input_d, sizeof(uint32_t)*N, hipMemcpyDeviceToDevice));
ComputeStuff::Scan::exclusiveScan(output_d, scratch_d, output_d, N);
assertSuccess(hipStreamSynchronize(0));
assertSuccess(hipGetLastError());
assertSuccess(hipMemcpy(offsets.data(), output_d, sizeof(uint32_t)*N, hipMemcpyDeviceToHost));
assertMatching(offsets.data(), offsetsGold.data(), N);
}
// Offset table
// ------------
if (offsetTable) {
// Offset without sum, disjoint input and output
assertSuccess(hipMemset(output_d, ~0, (N + 1) * sizeof(uint32_t)));
ComputeStuff::Scan::calcOffsets(output_d, scratch_d, input_d, N);
assertSuccess(hipStreamSynchronize(0));
assertSuccess(hipGetLastError());
assertSuccess(hipMemcpy(offsets.data(), output_d, sizeof(uint32_t)*(N + 1), hipMemcpyDeviceToHost));
assertMatching(offsets.data(), offsetsGold.data(), N + 1);
// Offset without sum, in-place
assertSuccess(hipMemcpy(output_d, input_d, sizeof(uint32_t)*N, hipMemcpyDeviceToDevice));
assertSuccess(hipMemset(output_d + N, ~0, sizeof(uint32_t)));
ComputeStuff::Scan::calcOffsets(output_d, scratch_d, output_d, N);
assertSuccess(hipStreamSynchronize(0));
assertSuccess(hipGetLastError());
assertSuccess(hipMemcpy(offsets.data(), output_d, sizeof(uint32_t)*(N + 1), hipMemcpyDeviceToHost));
assertMatching(offsets.data(), offsetsGold.data(), N + 1);
// Offset with sum, disjoint input and output
assertSuccess(hipMemset(output_d, ~0, (N + 1) * sizeof(uint32_t)));
*sum_h = ~0;
ComputeStuff::Scan::calcOffsets(output_d, sum_d, scratch_d, input_d, N);
assertSuccess(hipStreamSynchronize(0));
assertSuccess(hipGetLastError());
assertSuccess(hipMemcpy(offsets.data(), output_d, sizeof(uint32_t)*(N + 1), hipMemcpyDeviceToHost));
assertMatching(offsets.data(), offsetsGold.data(), N + 1);
if (*((volatile uint32_t*)sum_h) != offsetsGold.back()) {
std::cerr << "Wrong sum." << std::endl;
abort();
}
// Offset with sum, in-place
assertSuccess(hipMemcpy(output_d, input_d, sizeof(uint32_t)*N, hipMemcpyDeviceToDevice));
assertSuccess(hipMemset(output_d + N, ~0, sizeof(uint32_t)));
*sum_h = ~0;
ComputeStuff::Scan::calcOffsets(output_d, sum_d, scratch_d, output_d, N);
assertSuccess(hipStreamSynchronize(0));
assertSuccess(hipGetLastError());
assertSuccess(hipMemcpy(offsets.data(), output_d, sizeof(uint32_t)*(N + 1), hipMemcpyDeviceToHost));
assertMatching(offsets.data(), offsetsGold.data(), N + 1);
if (*((volatile uint32_t*)sum_h) != offsetsGold.back()) {
std::cerr << "Wrong sum." << std::endl;
abort();
}
}
// Compact
// -------
if (doCompact) {
*sum_h = 0;
assertSuccess(hipMemcpy(input_d, compact.data(), sizeof(uint32_t)*N, hipMemcpyHostToDevice));
ComputeStuff::Scan::compact(output_d, sum_d, scratch_d, input_d, N);
assertSuccess(hipStreamSynchronize(0));
assertSuccess(hipGetLastError());
assertSuccess(hipMemcpy(offsets.data(), output_d, sizeof(uint32_t)*N, hipMemcpyDeviceToHost));
assertMatching(offsets.data(), compactGold.data(), compactGold_sum);
if (*((volatile uint32_t*)sum_h) != compactGold_sum) {
std::cerr << "Wrong sum." << std::endl;
abort();
}
// In-place
*sum_h = 0;
assertSuccess(hipMemcpy(output_d, input_d, sizeof(uint32_t)*N, hipMemcpyDeviceToDevice));
ComputeStuff::Scan::compact(output_d, sum_d, scratch_d, output_d, N);
assertSuccess(hipStreamSynchronize(0));
assertSuccess(hipGetLastError());
assertSuccess(hipMemcpy(offsets.data(), output_d, sizeof(uint32_t)*N, hipMemcpyDeviceToHost));
assertMatching(offsets.data(), compactGold.data(), compactGold_sum);
if (*((volatile uint32_t*)sum_h) != compactGold_sum) {
std::cerr << "Wrong sum." << std::endl;
abort();
}
}
}
assertSuccess(hipFree(input_d));
assertSuccess(hipFree(scratch_d));
assertSuccess(hipFree(output_d));
assertSuccess(hipHostFree(sum_h));
}
void runPerf(uint32_t N)
{
hipStream_t stream;
assertSuccess(hipStreamCreate(&stream));
hipEvent_t startA, stopA, startB, stopB;
assertSuccess(hipEventCreate(&startA));
assertSuccess(hipEventCreate(&startB));
assertSuccess(hipEventCreate(&stopA));
assertSuccess(hipEventCreate(&stopB));
thrust::host_vector<uint32_t> in_h(N);
std::vector<uint32_t> in_s(N);
for (size_t i = 0; i < N; i++) {
in_h[i] = in_s[i] = i % 3;
}
uint32_t* output_d;
uint32_t* scratch_d;
uint32_t* input_d;
assertSuccess(hipMalloc(&output_d, sizeof(uint32_t)*(N + 1)));
assertSuccess(hipMalloc(&scratch_d, ComputeStuff::Scan::scratchByteSize(N)));
assertSuccess(hipMalloc(&input_d, sizeof(uint32_t)*N));
assertSuccess(hipMemcpy(input_d, in_s.data(), sizeof(uint32_t)*N, hipMemcpyHostToDevice));
thrust::device_vector<uint32_t> in_d = in_h;
thrust::device_vector<uint32_t> out_d(N);
// Run thrust::exclusive_scan
for (uint32_t i = 0; i < 10; i++) { // warm-up
thrust::exclusive_scan(thrust::hip::par.on(stream), in_d.begin(), in_d.end(), out_d.begin());
}
hipEventRecord(startA, stream);
for (uint32_t i = 0; i < 50; i++) { // perf-run
thrust::exclusive_scan(thrust::hip::par.on(stream), in_d.begin(), in_d.end(), out_d.begin());
}
hipEventRecord(stopA, stream);
// Run ComputeStuff scan
for (uint32_t i = 0; i < 10; i++) { // warm-up
ComputeStuff::Scan::exclusiveScan(output_d, scratch_d, input_d, N, stream);
}
hipEventRecord(startB, stream);
for (uint32_t i = 0; i < 50; i++) { // perf-run
ComputeStuff::Scan::exclusiveScan(output_d, scratch_d, input_d, N, stream);
}
hipEventRecord(stopB, stream);
hipEventSynchronize(stopB);
float elapsedA, elapsedB;
assertSuccess(hipEventElapsedTime(&elapsedA, startA, stopA));
assertSuccess(hipEventElapsedTime(&elapsedB, startB, stopB));
std::cerr << "|" << N << "|" << (elapsedA / 50.0) << "ms|" << (elapsedB / 50.0) << "ms|" << (elapsedB / elapsedA) << "|" << std::endl;
assertSuccess(hipFree(input_d));
assertSuccess(hipFree(scratch_d));
assertSuccess(hipFree(output_d));
assertSuccess(hipStreamDestroy(stream));
assertSuccess(hipEventDestroy(startA));
assertSuccess(hipEventDestroy(startB));
assertSuccess(hipEventDestroy(stopA));
assertSuccess(hipEventDestroy(stopB));
}
int main(int argc, char** argv)
{
for (int i = 1; i < argc; i++) {
if (strcmp("--perf", argv[i])) {
perf = true;
}
else if (strcmp("--no-perf", argv[i])) {
perf = false;
}
else if (strcmp("--test", argv[i])) {
test = true;
}
else if (strcmp("--no-test", argv[i])) {
test = false;
}
else if (strcmp("--inclusive-scan", argv[i])) {
inclusiveScan = true;
}
else if (strcmp("--no-inclusive-scan", argv[i])) {
inclusiveScan = false;
}
else if (strcmp("--exclusive-scan", argv[i])) {
exclusiveScan = true;
}
else if (strcmp("--no-exclusive-scan", argv[i])) {
exclusiveScan = false;
}
else if (strcmp("--offset-table", argv[i])) {
offsetTable = true;
}
else if (strcmp("--no-offset-table", argv[i])) {
offsetTable = false;
}
}
std::cerr << "test=" << test << ", perf=" << perf << std::endl;
std::cerr << "inclusive-scan=" << inclusiveScan << ", exclusive-scan=" << exclusiveScan << ", offset-table=" << offsetTable << std::endl;
assertSuccess(hipSetDevice(0));
hipDeviceProp_t props;
assertSuccess(hipGetDeviceProperties(&props, 0));
if (props.major < 3) {
std::cerr << "Compute capability 3.0 is minimum, device " << props.name << " has compute capability " << props.major << "." << props.minor << std::endl;
return -1;
}
if (test) {
for (uint64_t N = 1; N < (uint64_t)(props.totalGlobalMem / 10); N = (N == 0 ? 1 : 7 * N + N / 3))
{
runTest(static_cast<uint32_t>(N));
}
}
if (perf) {
std::cerr << "| N | thrust | ComputeStuff | ratio |" << std::endl;
std::cerr << "|---|--------|--------------|-------|" << std::endl;
for (uint64_t N = 1; N < (uint64_t)(props.totalGlobalMem / 10); N = 3 * N + N / 3) {
runPerf(static_cast<uint32_t>(N));
}
}
return 0;
}
| ae87c9f97ab2e82a287925dedb631d95ebf34d1f.cu | // This file is part of ComputeStuff copyright (C) 2017 Christopher Dyken.
// Released under the MIT license, please see LICENSE file for details.
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
#include <iostream>
#include <vector>
#include <Scan.h>
namespace {
bool test = true;
bool perf = true;
bool inclusiveScan = true;
bool exclusiveScan = true;
bool offsetTable = true;
bool doCompact = true;
void logFailure(cudaError_t error, const char *file, int line)
{
std::cerr << file << '@' << line << ": CUDA error: " << cudaGetErrorName(error) << std::endl;
abort();
}
}
#define assertSuccess(a) do { cudaError_t rv = (a); if(rv != cudaSuccess) logFailure(rv, __FILE__, __LINE__); } while(0)
void assertMatching(const uint32_t* result, const uint32_t* gold, uint32_t N)
{
for (size_t i = 0; i < N; i++) {
auto a = result[i];
auto b = gold[i];
if (a != b) {
std::cerr << "a=" << a << " != b=" << b << std::endl;
abort();
}
}
}
void runTest(uint32_t N)
{
std::vector<uint32_t> offsets(N + 1);
std::vector<uint32_t> counts(N);
std::vector<uint32_t> offsetsGold(N + 1);
std::vector<uint32_t> compact(N);
std::vector<uint32_t> compactGold(N + 1);
uint32_t* sum_h, *sum_d;
assertSuccess(cudaHostAlloc(&sum_h, sizeof(uint32_t), cudaHostAllocMapped));
assertSuccess(cudaHostGetDevicePointer(&sum_d, sum_h, 0));
uint32_t* output_d;
uint32_t* scratch_d;
uint32_t* input_d;
assertSuccess(cudaMalloc(&output_d, sizeof(uint32_t)*(N + 1)));
assertSuccess(cudaMalloc(&scratch_d, ComputeStuff::Scan::scratchByteSize(N)));
assertSuccess(cudaMalloc(&input_d, sizeof(uint32_t)*N));
for (uint32_t modulo = 1; modulo < 10; modulo++) {
std::cerr << "N=" << N << ", modulo=" << modulo << ", scratch=" << ComputeStuff::Scan::scratchByteSize(N) / sizeof(uint32_t) << std::endl;
// Set up problem
offsetsGold[0] = 0;
uint32_t compactGold_sum = 0;
for (uint32_t i = 0; i < N; i++) {
counts[i] = modulo == 1 ? 1 : (i % modulo);
offsetsGold[i + 1] = offsetsGold[i] + counts[i];
compact[i] = modulo == 1 ? 1 : (i % modulo == 0 ? i + 1 : 0); // Any nonzero number flags surviving element.
if (compact[i] != 0) {
compactGold[compactGold_sum++] = i;
}
}
assertSuccess(cudaMemcpy(input_d, counts.data(), sizeof(uint32_t)*N, cudaMemcpyHostToDevice));
// Inclusive scan
// --------------
if (inclusiveScan) {
// Disjoint input and output
assertSuccess(cudaMemset(output_d, ~0, N * sizeof(uint32_t)));
ComputeStuff::Scan::inclusiveScan(output_d, scratch_d, input_d, N);
assertSuccess(cudaStreamSynchronize(0));
assertSuccess(cudaGetLastError());
assertSuccess(cudaMemcpy(offsets.data(), output_d, sizeof(uint32_t)*N, cudaMemcpyDeviceToHost));
assertMatching(offsets.data(), offsetsGold.data() + 1, N);
}
// Exclusive scan
// --------------
if (exclusiveScan) {
// Disjoint input and output
assertSuccess(cudaMemset(output_d, ~0, N * sizeof(uint32_t)));
ComputeStuff::Scan::exclusiveScan(output_d, scratch_d, input_d, N);
assertSuccess(cudaStreamSynchronize(0));
assertSuccess(cudaGetLastError());
assertSuccess(cudaMemcpy(offsets.data(), output_d, sizeof(uint32_t)*N, cudaMemcpyDeviceToHost));
assertMatching(offsets.data(), offsetsGold.data(), N);
// In-place
assertSuccess(cudaMemcpy(output_d, input_d, sizeof(uint32_t)*N, cudaMemcpyDeviceToDevice));
ComputeStuff::Scan::exclusiveScan(output_d, scratch_d, output_d, N);
assertSuccess(cudaStreamSynchronize(0));
assertSuccess(cudaGetLastError());
assertSuccess(cudaMemcpy(offsets.data(), output_d, sizeof(uint32_t)*N, cudaMemcpyDeviceToHost));
assertMatching(offsets.data(), offsetsGold.data(), N);
}
// Offset table
// ------------
if (offsetTable) {
// Offset without sum, disjoint input and output
assertSuccess(cudaMemset(output_d, ~0, (N + 1) * sizeof(uint32_t)));
ComputeStuff::Scan::calcOffsets(output_d, scratch_d, input_d, N);
assertSuccess(cudaStreamSynchronize(0));
assertSuccess(cudaGetLastError());
assertSuccess(cudaMemcpy(offsets.data(), output_d, sizeof(uint32_t)*(N + 1), cudaMemcpyDeviceToHost));
assertMatching(offsets.data(), offsetsGold.data(), N + 1);
// Offset without sum, in-place
assertSuccess(cudaMemcpy(output_d, input_d, sizeof(uint32_t)*N, cudaMemcpyDeviceToDevice));
assertSuccess(cudaMemset(output_d + N, ~0, sizeof(uint32_t)));
ComputeStuff::Scan::calcOffsets(output_d, scratch_d, output_d, N);
assertSuccess(cudaStreamSynchronize(0));
assertSuccess(cudaGetLastError());
assertSuccess(cudaMemcpy(offsets.data(), output_d, sizeof(uint32_t)*(N + 1), cudaMemcpyDeviceToHost));
assertMatching(offsets.data(), offsetsGold.data(), N + 1);
// Offset with sum, disjoint input and output
assertSuccess(cudaMemset(output_d, ~0, (N + 1) * sizeof(uint32_t)));
*sum_h = ~0;
ComputeStuff::Scan::calcOffsets(output_d, sum_d, scratch_d, input_d, N);
assertSuccess(cudaStreamSynchronize(0));
assertSuccess(cudaGetLastError());
assertSuccess(cudaMemcpy(offsets.data(), output_d, sizeof(uint32_t)*(N + 1), cudaMemcpyDeviceToHost));
assertMatching(offsets.data(), offsetsGold.data(), N + 1);
if (*((volatile uint32_t*)sum_h) != offsetsGold.back()) {
std::cerr << "Wrong sum." << std::endl;
abort();
}
// Offset with sum, in-place
assertSuccess(cudaMemcpy(output_d, input_d, sizeof(uint32_t)*N, cudaMemcpyDeviceToDevice));
assertSuccess(cudaMemset(output_d + N, ~0, sizeof(uint32_t)));
*sum_h = ~0;
ComputeStuff::Scan::calcOffsets(output_d, sum_d, scratch_d, output_d, N);
assertSuccess(cudaStreamSynchronize(0));
assertSuccess(cudaGetLastError());
assertSuccess(cudaMemcpy(offsets.data(), output_d, sizeof(uint32_t)*(N + 1), cudaMemcpyDeviceToHost));
assertMatching(offsets.data(), offsetsGold.data(), N + 1);
if (*((volatile uint32_t*)sum_h) != offsetsGold.back()) {
std::cerr << "Wrong sum." << std::endl;
abort();
}
}
// Compact
// -------
if (doCompact) {
*sum_h = 0;
assertSuccess(cudaMemcpy(input_d, compact.data(), sizeof(uint32_t)*N, cudaMemcpyHostToDevice));
ComputeStuff::Scan::compact(output_d, sum_d, scratch_d, input_d, N);
assertSuccess(cudaStreamSynchronize(0));
assertSuccess(cudaGetLastError());
assertSuccess(cudaMemcpy(offsets.data(), output_d, sizeof(uint32_t)*N, cudaMemcpyDeviceToHost));
assertMatching(offsets.data(), compactGold.data(), compactGold_sum);
if (*((volatile uint32_t*)sum_h) != compactGold_sum) {
std::cerr << "Wrong sum." << std::endl;
abort();
}
// In-place
*sum_h = 0;
assertSuccess(cudaMemcpy(output_d, input_d, sizeof(uint32_t)*N, cudaMemcpyDeviceToDevice));
ComputeStuff::Scan::compact(output_d, sum_d, scratch_d, output_d, N);
assertSuccess(cudaStreamSynchronize(0));
assertSuccess(cudaGetLastError());
assertSuccess(cudaMemcpy(offsets.data(), output_d, sizeof(uint32_t)*N, cudaMemcpyDeviceToHost));
assertMatching(offsets.data(), compactGold.data(), compactGold_sum);
if (*((volatile uint32_t*)sum_h) != compactGold_sum) {
std::cerr << "Wrong sum." << std::endl;
abort();
}
}
}
assertSuccess(cudaFree(input_d));
assertSuccess(cudaFree(scratch_d));
assertSuccess(cudaFree(output_d));
assertSuccess(cudaFreeHost(sum_h));
}
void runPerf(uint32_t N)
{
cudaStream_t stream;
assertSuccess(cudaStreamCreate(&stream));
cudaEvent_t startA, stopA, startB, stopB;
assertSuccess(cudaEventCreate(&startA));
assertSuccess(cudaEventCreate(&startB));
assertSuccess(cudaEventCreate(&stopA));
assertSuccess(cudaEventCreate(&stopB));
thrust::host_vector<uint32_t> in_h(N);
std::vector<uint32_t> in_s(N);
for (size_t i = 0; i < N; i++) {
in_h[i] = in_s[i] = i % 3;
}
uint32_t* output_d;
uint32_t* scratch_d;
uint32_t* input_d;
assertSuccess(cudaMalloc(&output_d, sizeof(uint32_t)*(N + 1)));
assertSuccess(cudaMalloc(&scratch_d, ComputeStuff::Scan::scratchByteSize(N)));
assertSuccess(cudaMalloc(&input_d, sizeof(uint32_t)*N));
assertSuccess(cudaMemcpy(input_d, in_s.data(), sizeof(uint32_t)*N, cudaMemcpyHostToDevice));
thrust::device_vector<uint32_t> in_d = in_h;
thrust::device_vector<uint32_t> out_d(N);
// Run thrust::exclusive_scan
for (uint32_t i = 0; i < 10; i++) { // warm-up
thrust::exclusive_scan(thrust::cuda::par.on(stream), in_d.begin(), in_d.end(), out_d.begin());
}
cudaEventRecord(startA, stream);
for (uint32_t i = 0; i < 50; i++) { // perf-run
thrust::exclusive_scan(thrust::cuda::par.on(stream), in_d.begin(), in_d.end(), out_d.begin());
}
cudaEventRecord(stopA, stream);
// Run ComputeStuff scan
for (uint32_t i = 0; i < 10; i++) { // warm-up
ComputeStuff::Scan::exclusiveScan(output_d, scratch_d, input_d, N, stream);
}
cudaEventRecord(startB, stream);
for (uint32_t i = 0; i < 50; i++) { // perf-run
ComputeStuff::Scan::exclusiveScan(output_d, scratch_d, input_d, N, stream);
}
cudaEventRecord(stopB, stream);
cudaEventSynchronize(stopB);
float elapsedA, elapsedB;
assertSuccess(cudaEventElapsedTime(&elapsedA, startA, stopA));
assertSuccess(cudaEventElapsedTime(&elapsedB, startB, stopB));
std::cerr << "|" << N << "|" << (elapsedA / 50.0) << "ms|" << (elapsedB / 50.0) << "ms|" << (elapsedB / elapsedA) << "|" << std::endl;
assertSuccess(cudaFree(input_d));
assertSuccess(cudaFree(scratch_d));
assertSuccess(cudaFree(output_d));
assertSuccess(cudaStreamDestroy(stream));
assertSuccess(cudaEventDestroy(startA));
assertSuccess(cudaEventDestroy(startB));
assertSuccess(cudaEventDestroy(stopA));
assertSuccess(cudaEventDestroy(stopB));
}
int main(int argc, char** argv)
{
for (int i = 1; i < argc; i++) {
if (strcmp("--perf", argv[i])) {
perf = true;
}
else if (strcmp("--no-perf", argv[i])) {
perf = false;
}
else if (strcmp("--test", argv[i])) {
test = true;
}
else if (strcmp("--no-test", argv[i])) {
test = false;
}
else if (strcmp("--inclusive-scan", argv[i])) {
inclusiveScan = true;
}
else if (strcmp("--no-inclusive-scan", argv[i])) {
inclusiveScan = false;
}
else if (strcmp("--exclusive-scan", argv[i])) {
exclusiveScan = true;
}
else if (strcmp("--no-exclusive-scan", argv[i])) {
exclusiveScan = false;
}
else if (strcmp("--offset-table", argv[i])) {
offsetTable = true;
}
else if (strcmp("--no-offset-table", argv[i])) {
offsetTable = false;
}
}
std::cerr << "test=" << test << ", perf=" << perf << std::endl;
std::cerr << "inclusive-scan=" << inclusiveScan << ", exclusive-scan=" << exclusiveScan << ", offset-table=" << offsetTable << std::endl;
assertSuccess(cudaSetDevice(0));
cudaDeviceProp props;
assertSuccess(cudaGetDeviceProperties(&props, 0));
if (props.major < 3) {
std::cerr << "Compute capability 3.0 is minimum, device " << props.name << " has compute capability " << props.major << "." << props.minor << std::endl;
return -1;
}
if (test) {
for (uint64_t N = 1; N < (uint64_t)(props.totalGlobalMem / 10); N = (N == 0 ? 1 : 7 * N + N / 3))
{
runTest(static_cast<uint32_t>(N));
}
}
if (perf) {
std::cerr << "| N | thrust | ComputeStuff | ratio |" << std::endl;
std::cerr << "|---|--------|--------------|-------|" << std::endl;
for (uint64_t N = 1; N < (uint64_t)(props.totalGlobalMem / 10); N = 3 * N + N / 3) {
runPerf(static_cast<uint32_t>(N));
}
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.