hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
350c81ea12ff1c96871f7d65570e9337caa2206b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
__global__
void doubleElements(int *a, int N)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
a[i] *= 2;
}
}
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main()
{
int N = 10000;
int *a;
size_t size = N * sizeof(int);
hipMallocManaged(&a, size);
init(a, N);
/*
* The size of this grid is 256*32 = 8192.
*/
size_t threads_per_block = 256;
size_t number_of_blocks = 64;
hipLaunchKernelGGL(( doubleElements), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, a, N);
hipDeviceSynchronize();
bool areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE");
hipFree(a);
}
| 350c81ea12ff1c96871f7d65570e9337caa2206b.cu | #include <stdio.h>
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
__global__
void doubleElements(int *a, int N)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
a[i] *= 2;
}
}
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main()
{
int N = 10000;
int *a;
size_t size = N * sizeof(int);
cudaMallocManaged(&a, size);
init(a, N);
/*
* The size of this grid is 256*32 = 8192.
*/
size_t threads_per_block = 256;
size_t number_of_blocks = 64;
doubleElements<<<number_of_blocks, threads_per_block>>>(a, N);
cudaDeviceSynchronize();
bool areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE");
cudaFree(a);
}
|
5f07c31f9697cf7d14b7f84d68f4007e2277a656.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <sys/time.h>
#include <unistd.h>
// includes CUDA
#include <hip/hip_runtime.h>
// includes, project
#include <helper_cuda.h>
#define BLOCKS 12
#define BLOCKSIZE 544
#define BSize 12
#define QSize 17
#define SSize 128
#define DATASIZE 512
#define THREADS 512
#define N (DATASIZE*DATASIZE)
#define tasks 36
#define imin(a, b) (a<=b?a:b)
double my_timer()
{
struct timeval time;
double _ret_val_0;
gettimeofday(( & time), 0);
_ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0));
return _ret_val_0;
}
struct kernel_para{
volatile int *A, *B;
volatile int *C;
volatile int size;
volatile int block;
volatile int thread;
volatile int warp;
volatile int req;
volatile int funcId;
volatile int taskId;
volatile int doneHost;
int doneGPU;
};
struct kernel_para_GPU{
int warpId;
int baseId;
int taskId;
};
int ipow(int base, int exp)
{
int result = 1;
while (exp)
{
if (exp & 1)
result *= base;
exp >>= 1;
base *= base;
}
return result;
}
extern __global__ void deviceRT(volatile int *done, volatile int *totalExecTasks, struct kernel_para_GPU *warpPool, volatile struct kernel_para *taskBuffer, struct kernel_para *taskArgs, volatile int *exec);
int main(int argc, char** argv){
double startTime, endTime;
int totalWarps = ((BLOCKSIZE*BLOCKS)/32);
hipSetDevice(0);
hipDeviceReset();
hipStream_t s2;
hipStream_t s3;
checkCudaErrors(hipStreamCreate(&s2));
checkCudaErrors(hipStreamCreate(&s3));
// To interrupt the runtime
int *done, *doneDev;
int *exec, *execDev;
int *totalExecTasks, *totalExecTasksDev;
// int *totalScheTasks, *totalScheTasksDev;
struct kernel_para_GPU *warpPoolDev;
struct kernel_para *taskArgs, *taskArgsDev;
struct kernel_para *taskparaBuffer, *taskparaBufferDev;
// checkCudaErrors(hipHostMalloc(&totalScheTasks, sizeof(int), hipHostMallocDefault));
// checkCudaErrors(hipMalloc(&totalScheTasksDev, sizeof(int)));
checkCudaErrors(hipHostMalloc(&exec, sizeof(int), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&execDev, sizeof(int)));
// done flag
checkCudaErrors(hipHostMalloc(&done, sizeof(int), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&doneDev, sizeof(int)));
checkCudaErrors(hipMalloc(&warpPoolDev, totalWarps*sizeof(struct kernel_para_GPU)));
checkCudaErrors(hipHostMalloc(&totalExecTasks, sizeof(int), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&totalExecTasksDev, sizeof(int)));
checkCudaErrors(hipHostMalloc(&taskArgs, tasks*sizeof(struct kernel_para), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&taskArgsDev, tasks*sizeof(struct kernel_para)));
checkCudaErrors(hipHostMalloc(&taskparaBuffer, BSize*SSize*sizeof(struct kernel_para), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&taskparaBufferDev, BSize*SSize*sizeof(struct kernel_para)));
// input data
int *aDev[tasks], *bDev[tasks], *cDev[tasks];
int *a[tasks], *b[tasks], *c[tasks];
for(int i=0; i<tasks; i++) {
checkCudaErrors(hipMalloc(&aDev[i], N*sizeof(int)));
checkCudaErrors(hipMalloc(&bDev[i], N*sizeof(int)));
checkCudaErrors(hipMalloc(&cDev[i], N*sizeof(int)));
checkCudaErrors(hipHostMalloc(&a[i], N*sizeof(int), NULL));
checkCudaErrors(hipHostMalloc(&b[i], N*sizeof(int), NULL));
checkCudaErrors(hipHostMalloc(&c[i], N*sizeof(int), NULL));
}
for(int i = 0; i < tasks; i++){
for(int j=0; j<N; j++) {
a[i][j]= (i%32)+1;
b[i][j]= (i%32)+1;
c[i][j] = 0;
}
}
for(int i = 0; i < tasks; i++){
checkCudaErrors(hipMemcpyAsync(aDev[i], a[i] , N*sizeof(int),hipMemcpyHostToDevice, s3));
checkCudaErrors(hipMemcpyAsync(bDev[i], b[i] , N*sizeof(int),hipMemcpyHostToDevice, s3));
checkCudaErrors(hipMemcpyAsync(cDev[i], c[i] , N*sizeof(int),hipMemcpyHostToDevice, s3));
}
for(int i = 0; i < tasks; i++){
// init. task para
taskArgs[i].A = aDev[i];
taskArgs[i].B = bDev[i];
taskArgs[i].C = cDev[i];
taskArgs[i].size = DATASIZE;
taskArgs[i].block = 1;
taskArgs[i].thread = THREADS;
taskArgs[i].warp = THREADS/32;
taskArgs[i].funcId = 1;
// taskArgs[i].taskId = i;
taskArgs[i].req = 1;
taskArgs[i].doneHost = 1;
taskArgs[i].doneGPU = THREADS/32;
// printf("Host:%p\n", taskArgs[i].A);
}
for(int i = 0; i < (BSize*SSize); i++){
taskparaBuffer[i].req = 0;
}
*done = 0;
*exec = 0;
//*totalScheTasks = 0;
*totalExecTasks = 0;
checkCudaErrors(hipMemcpyAsync(execDev, exec, sizeof(int), hipMemcpyHostToDevice, s3));
checkCudaErrors(hipMemcpyAsync(doneDev, done, sizeof(int), hipMemcpyHostToDevice, s3));
checkCudaErrors(hipMemcpyAsync(totalExecTasksDev, totalExecTasks, sizeof(int), hipMemcpyHostToDevice, s3));
// checkCudaErrors(hipMemcpyAsync(totalScheTasksDev, totalScheTasks, sizeof(int), hipMemcpyHostToDevice, s3));
checkCudaErrors(hipMemcpyAsync(taskparaBufferDev, taskparaBuffer, BSize*SSize*sizeof(struct kernel_para), hipMemcpyHostToDevice, s3));
checkCudaErrors(hipMemcpyAsync(taskArgsDev, taskArgs, tasks*sizeof(struct kernel_para), hipMemcpyHostToDevice, s3));
checkCudaErrors(hipStreamSynchronize(s3));
hipLaunchKernelGGL(( deviceRT), dim3(BLOCKS),dim3(BLOCKSIZE),0, s2, doneDev, totalExecTasksDev, warpPoolDev, taskparaBufferDev, taskArgsDev, execDev);
#if 1
// para delivery
int j = 0;
int c1 = 0;
startTime = my_timer();
while(j < tasks){
for(int i = 0; i < (SSize*BSize); i++){
if(taskparaBuffer[i].req == 0){
taskparaBuffer[i].warp = THREADS/32;
taskparaBuffer[i].req = 1;
taskparaBuffer[i].taskId = j;
// printf("Host:%d, %d\n", i*BSize+j, t);
checkCudaErrors(hipMemcpyAsync(&taskparaBufferDev[i], &taskparaBuffer[i], sizeof(struct kernel_para), hipMemcpyHostToDevice, s3));
j++;
if(j == tasks) break;
}
}
if(j == tasks) break;
checkCudaErrors(hipMemcpyAsync(taskparaBuffer, taskparaBufferDev, BSize*SSize*sizeof(struct kernel_para), hipMemcpyDeviceToHost, s3));
checkCudaErrors(hipStreamSynchronize(s3));
c1++;
}
endTime = my_timer();
printf("Elapsed Time1:%lf sec.\n", (endTime-startTime));
printf("Iteration1:%d\n", c1);
#endif
#if 1
int all = 0;
startTime = my_timer();
while(*totalExecTasks < tasks){
checkCudaErrors(hipMemcpyAsync(totalExecTasks, totalExecTasksDev, sizeof(int), hipMemcpyDeviceToHost, s3));
checkCudaErrors(hipStreamSynchronize(s3));
all++;
//if(all > 40000) break;
}
endTime = my_timer();
printf("Elapsed Time2:%lf sec.\n", (endTime-startTime));
printf("Iterations:%d, %d\n", all, *totalExecTasks);
#endif
#if 1
*exec = 1;
checkCudaErrors(hipMemcpyAsync(execDev, exec, sizeof(int), hipMemcpyHostToDevice, s3));
*done = 1;
checkCudaErrors(hipMemcpyAsync(doneDev, done, sizeof(int), hipMemcpyHostToDevice, s3));
#endif
#if 1
// copy back results of tasks
for(int i=0; i<tasks; i++) {
checkCudaErrors(hipMemcpyAsync (c[i], cDev[i] , N*sizeof(int),hipMemcpyDeviceToHost, s3));
}
checkCudaErrors(hipStreamSynchronize(s3));
#endif
#if 1
// verification
for (int i = 0; i < tasks; i++){
for(int j = 0; j < N; j++){
if(c[i][j] != DATASIZE*ipow((i%32)+1, 2)){
printf("Error:%d, %d\n", i, c[i][j]);
break;
}
}
}
#endif
for(int i = 0; i < tasks; i++){
checkCudaErrors(hipHostFree(a[i]));
checkCudaErrors(hipHostFree(b[i]));
checkCudaErrors(hipHostFree(c[i]));
checkCudaErrors(hipFree(aDev[i]));
checkCudaErrors(hipFree(bDev[i]));
checkCudaErrors(hipFree(cDev[i]));
}
checkCudaErrors(hipStreamDestroy(s2));
hipStreamDestroy(s3);
hipHostFree(done);
hipHostFree(exec);
hipHostFree(totalExecTasks);
hipHostFree(taskArgs);
hipHostFree(taskparaBuffer);
// hipHostFree(totalScheTasks);
hipFree(totalExecTasksDev);
// hipFree(totalScheTasksDev);
hipFree(warpPoolDev);
hipFree(doneDev);
hipFree(execDev);
hipFree(taskArgsDev);
hipFree(taskparaBufferDev);
return 0;
}
| 5f07c31f9697cf7d14b7f84d68f4007e2277a656.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <sys/time.h>
#include <unistd.h>
// includes CUDA
#include <cuda_runtime.h>
// includes, project
#include <helper_cuda.h>
#define BLOCKS 12
#define BLOCKSIZE 544
#define BSize 12
#define QSize 17
#define SSize 128
#define DATASIZE 512
#define THREADS 512
#define N (DATASIZE*DATASIZE)
#define tasks 36
#define imin(a, b) (a<=b?a:b)
double my_timer()
{
struct timeval time;
double _ret_val_0;
gettimeofday(( & time), 0);
_ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0));
return _ret_val_0;
}
struct kernel_para{
volatile int *A, *B;
volatile int *C;
volatile int size;
volatile int block;
volatile int thread;
volatile int warp;
volatile int req;
volatile int funcId;
volatile int taskId;
volatile int doneHost;
int doneGPU;
};
struct kernel_para_GPU{
int warpId;
int baseId;
int taskId;
};
int ipow(int base, int exp)
{
int result = 1;
while (exp)
{
if (exp & 1)
result *= base;
exp >>= 1;
base *= base;
}
return result;
}
extern __global__ void deviceRT(volatile int *done, volatile int *totalExecTasks, struct kernel_para_GPU *warpPool, volatile struct kernel_para *taskBuffer, struct kernel_para *taskArgs, volatile int *exec);
int main(int argc, char** argv){
double startTime, endTime;
int totalWarps = ((BLOCKSIZE*BLOCKS)/32);
cudaSetDevice(0);
cudaDeviceReset();
cudaStream_t s2;
cudaStream_t s3;
checkCudaErrors(cudaStreamCreate(&s2));
checkCudaErrors(cudaStreamCreate(&s3));
// To interrupt the runtime
int *done, *doneDev;
int *exec, *execDev;
int *totalExecTasks, *totalExecTasksDev;
// int *totalScheTasks, *totalScheTasksDev;
struct kernel_para_GPU *warpPoolDev;
struct kernel_para *taskArgs, *taskArgsDev;
struct kernel_para *taskparaBuffer, *taskparaBufferDev;
// checkCudaErrors(cudaHostAlloc(&totalScheTasks, sizeof(int), cudaHostAllocDefault));
// checkCudaErrors(cudaMalloc(&totalScheTasksDev, sizeof(int)));
checkCudaErrors(cudaHostAlloc(&exec, sizeof(int), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&execDev, sizeof(int)));
// done flag
checkCudaErrors(cudaHostAlloc(&done, sizeof(int), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&doneDev, sizeof(int)));
checkCudaErrors(cudaMalloc(&warpPoolDev, totalWarps*sizeof(struct kernel_para_GPU)));
checkCudaErrors(cudaHostAlloc(&totalExecTasks, sizeof(int), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&totalExecTasksDev, sizeof(int)));
checkCudaErrors(cudaHostAlloc(&taskArgs, tasks*sizeof(struct kernel_para), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&taskArgsDev, tasks*sizeof(struct kernel_para)));
checkCudaErrors(cudaHostAlloc(&taskparaBuffer, BSize*SSize*sizeof(struct kernel_para), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&taskparaBufferDev, BSize*SSize*sizeof(struct kernel_para)));
// input data
int *aDev[tasks], *bDev[tasks], *cDev[tasks];
int *a[tasks], *b[tasks], *c[tasks];
for(int i=0; i<tasks; i++) {
checkCudaErrors(cudaMalloc(&aDev[i], N*sizeof(int)));
checkCudaErrors(cudaMalloc(&bDev[i], N*sizeof(int)));
checkCudaErrors(cudaMalloc(&cDev[i], N*sizeof(int)));
checkCudaErrors(cudaHostAlloc(&a[i], N*sizeof(int), NULL));
checkCudaErrors(cudaHostAlloc(&b[i], N*sizeof(int), NULL));
checkCudaErrors(cudaHostAlloc(&c[i], N*sizeof(int), NULL));
}
for(int i = 0; i < tasks; i++){
for(int j=0; j<N; j++) {
a[i][j]= (i%32)+1;
b[i][j]= (i%32)+1;
c[i][j] = 0;
}
}
for(int i = 0; i < tasks; i++){
checkCudaErrors(cudaMemcpyAsync(aDev[i], a[i] , N*sizeof(int),cudaMemcpyHostToDevice, s3));
checkCudaErrors(cudaMemcpyAsync(bDev[i], b[i] , N*sizeof(int),cudaMemcpyHostToDevice, s3));
checkCudaErrors(cudaMemcpyAsync(cDev[i], c[i] , N*sizeof(int),cudaMemcpyHostToDevice, s3));
}
for(int i = 0; i < tasks; i++){
// init. task para
taskArgs[i].A = aDev[i];
taskArgs[i].B = bDev[i];
taskArgs[i].C = cDev[i];
taskArgs[i].size = DATASIZE;
taskArgs[i].block = 1;
taskArgs[i].thread = THREADS;
taskArgs[i].warp = THREADS/32;
taskArgs[i].funcId = 1;
// taskArgs[i].taskId = i;
taskArgs[i].req = 1;
taskArgs[i].doneHost = 1;
taskArgs[i].doneGPU = THREADS/32;
// printf("Host:%p\n", taskArgs[i].A);
}
for(int i = 0; i < (BSize*SSize); i++){
taskparaBuffer[i].req = 0;
}
*done = 0;
*exec = 0;
//*totalScheTasks = 0;
*totalExecTasks = 0;
checkCudaErrors(cudaMemcpyAsync(execDev, exec, sizeof(int), cudaMemcpyHostToDevice, s3));
checkCudaErrors(cudaMemcpyAsync(doneDev, done, sizeof(int), cudaMemcpyHostToDevice, s3));
checkCudaErrors(cudaMemcpyAsync(totalExecTasksDev, totalExecTasks, sizeof(int), cudaMemcpyHostToDevice, s3));
// checkCudaErrors(cudaMemcpyAsync(totalScheTasksDev, totalScheTasks, sizeof(int), cudaMemcpyHostToDevice, s3));
checkCudaErrors(cudaMemcpyAsync(taskparaBufferDev, taskparaBuffer, BSize*SSize*sizeof(struct kernel_para), cudaMemcpyHostToDevice, s3));
checkCudaErrors(cudaMemcpyAsync(taskArgsDev, taskArgs, tasks*sizeof(struct kernel_para), cudaMemcpyHostToDevice, s3));
checkCudaErrors(cudaStreamSynchronize(s3));
deviceRT<<<BLOCKS,BLOCKSIZE,0, s2>>>(doneDev, totalExecTasksDev, warpPoolDev, taskparaBufferDev, taskArgsDev, execDev);
#if 1
// para delivery
int j = 0;
int c1 = 0;
startTime = my_timer();
while(j < tasks){
for(int i = 0; i < (SSize*BSize); i++){
if(taskparaBuffer[i].req == 0){
taskparaBuffer[i].warp = THREADS/32;
taskparaBuffer[i].req = 1;
taskparaBuffer[i].taskId = j;
// printf("Host:%d, %d\n", i*BSize+j, t);
checkCudaErrors(cudaMemcpyAsync(&taskparaBufferDev[i], &taskparaBuffer[i], sizeof(struct kernel_para), cudaMemcpyHostToDevice, s3));
j++;
if(j == tasks) break;
}
}
if(j == tasks) break;
checkCudaErrors(cudaMemcpyAsync(taskparaBuffer, taskparaBufferDev, BSize*SSize*sizeof(struct kernel_para), cudaMemcpyDeviceToHost, s3));
checkCudaErrors(cudaStreamSynchronize(s3));
c1++;
}
endTime = my_timer();
printf("Elapsed Time1:%lf sec.\n", (endTime-startTime));
printf("Iteration1:%d\n", c1);
#endif
#if 1
int all = 0;
startTime = my_timer();
while(*totalExecTasks < tasks){
checkCudaErrors(cudaMemcpyAsync(totalExecTasks, totalExecTasksDev, sizeof(int), cudaMemcpyDeviceToHost, s3));
checkCudaErrors(cudaStreamSynchronize(s3));
all++;
//if(all > 40000) break;
}
endTime = my_timer();
printf("Elapsed Time2:%lf sec.\n", (endTime-startTime));
printf("Iterations:%d, %d\n", all, *totalExecTasks);
#endif
#if 1
*exec = 1;
checkCudaErrors(cudaMemcpyAsync(execDev, exec, sizeof(int), cudaMemcpyHostToDevice, s3));
*done = 1;
checkCudaErrors(cudaMemcpyAsync(doneDev, done, sizeof(int), cudaMemcpyHostToDevice, s3));
#endif
#if 1
// copy back results of tasks
for(int i=0; i<tasks; i++) {
checkCudaErrors(cudaMemcpyAsync (c[i], cDev[i] , N*sizeof(int),cudaMemcpyDeviceToHost, s3));
}
checkCudaErrors(cudaStreamSynchronize(s3));
#endif
#if 1
// verification
for (int i = 0; i < tasks; i++){
for(int j = 0; j < N; j++){
if(c[i][j] != DATASIZE*ipow((i%32)+1, 2)){
printf("Error:%d, %d\n", i, c[i][j]);
break;
}
}
}
#endif
for(int i = 0; i < tasks; i++){
checkCudaErrors(cudaFreeHost(a[i]));
checkCudaErrors(cudaFreeHost(b[i]));
checkCudaErrors(cudaFreeHost(c[i]));
checkCudaErrors(cudaFree(aDev[i]));
checkCudaErrors(cudaFree(bDev[i]));
checkCudaErrors(cudaFree(cDev[i]));
}
checkCudaErrors(cudaStreamDestroy(s2));
cudaStreamDestroy(s3);
cudaFreeHost(done);
cudaFreeHost(exec);
cudaFreeHost(totalExecTasks);
cudaFreeHost(taskArgs);
cudaFreeHost(taskparaBuffer);
// cudaFreeHost(totalScheTasks);
cudaFree(totalExecTasksDev);
// cudaFree(totalScheTasksDev);
cudaFree(warpPoolDev);
cudaFree(doneDev);
cudaFree(execDev);
cudaFree(taskArgsDev);
cudaFree(taskparaBufferDev);
return 0;
}
|
e837d603d9834fbd97778de508d143cd16bca3e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Bprop2(const float* out, const float* layer1, float* dsyn2, const int count, const float alpha)
{
int i = blockDim.y*blockIdx.y + threadIdx.y; //256
int j = blockDim.x*blockIdx.x + threadIdx.x; //4
//int k = blockIdx.x; //Data.count
atomicAdd(&dsyn2[i*4 + j], out[j] * layer1[256*(count) + i] * alpha);
} | e837d603d9834fbd97778de508d143cd16bca3e3.cu | #include "includes.h"
__global__ void Bprop2(const float* out, const float* layer1, float* dsyn2, const int count, const float alpha)
{
int i = blockDim.y*blockIdx.y + threadIdx.y; //256
int j = blockDim.x*blockIdx.x + threadIdx.x; //4
//int k = blockIdx.x; //Data.count
atomicAdd(&dsyn2[i*4 + j], out[j] * layer1[256*(count) + i] * alpha);
} |
725dc0991a0c2314417e1e3e21709b6a739bdbe7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void computeCoordsMandelbrot(double* coordX, double* coordY, int width, int height, double minX, double minY, double rangeX, double rangeY)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int y = i / width;
int x = i % width;
coordX[i] = minX + (rangeX / width) * x;
coordY[i] = minY + (rangeY / height) * y;
}
__global__ void findIterationsMandelbrot(int* iterArray, double* coordX, double* coordY)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
double realZ = coordX[i];
double imagZ = coordY[i];
double realZ2 = 0;
double imagZ2 = 0;
while (iterArray[i] < 255)
{
iterArray[i]++;
realZ2 = realZ * realZ;
imagZ2 = imagZ * imagZ;
if (realZ2 + imagZ2 > 4)
{
break;
}
imagZ = 2 * realZ * imagZ + coordY[i];
realZ = realZ2 - imagZ2 + coordX[i];
}
}
// Helper function for using CUDA to add vectors in parallel.
extern "C" __declspec(dllexport) void computeMandelbrot(int* iterArray, int width, int height, double minX, double minY, double rangeX, double rangeY)
{
hipError_t cudaStatus;
double* coordX;
double* coordY;
int* iterArrayGpu;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
hipMalloc(&coordX, width * height * sizeof(double));
hipMalloc(&coordY, width * height * sizeof(double));
hipMalloc(&iterArrayGpu, width * height * sizeof(int));
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( computeCoordsMandelbrot) , dim3((width*height)/256), dim3(256), 0, 0, coordX, coordY, width, height, minX, minY, rangeX, rangeY);
hipLaunchKernelGGL(( findIterationsMandelbrot) , dim3((width * height) / 256), dim3(256) , 0, 0, iterArrayGpu, coordX, coordY);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(iterArray, iterArrayGpu, width * height * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(coordX);
hipFree(coordY);
hipFree(iterArrayGpu);
}
| 725dc0991a0c2314417e1e3e21709b6a739bdbe7.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void computeCoordsMandelbrot(double* coordX, double* coordY, int width, int height, double minX, double minY, double rangeX, double rangeY)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int y = i / width;
int x = i % width;
coordX[i] = minX + (rangeX / width) * x;
coordY[i] = minY + (rangeY / height) * y;
}
__global__ void findIterationsMandelbrot(int* iterArray, double* coordX, double* coordY)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
double realZ = coordX[i];
double imagZ = coordY[i];
double realZ2 = 0;
double imagZ2 = 0;
while (iterArray[i] < 255)
{
iterArray[i]++;
realZ2 = realZ * realZ;
imagZ2 = imagZ * imagZ;
if (realZ2 + imagZ2 > 4)
{
break;
}
imagZ = 2 * realZ * imagZ + coordY[i];
realZ = realZ2 - imagZ2 + coordX[i];
}
}
// Helper function for using CUDA to add vectors in parallel.
extern "C" __declspec(dllexport) void computeMandelbrot(int* iterArray, int width, int height, double minX, double minY, double rangeX, double rangeY)
{
cudaError_t cudaStatus;
double* coordX;
double* coordY;
int* iterArrayGpu;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaMalloc(&coordX, width * height * sizeof(double));
cudaMalloc(&coordY, width * height * sizeof(double));
cudaMalloc(&iterArrayGpu, width * height * sizeof(int));
// Launch a kernel on the GPU with one thread for each element.
computeCoordsMandelbrot <<<(width*height)/256, 256>>>(coordX, coordY, width, height, minX, minY, rangeX, rangeY);
findIterationsMandelbrot <<<(width * height) / 256, 256 >>>(iterArrayGpu, coordX, coordY);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(iterArray, iterArrayGpu, width * height * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(coordX);
cudaFree(coordY);
cudaFree(iterArrayGpu);
}
|
f67f3606549372745203e18dffa3876529fc3a07.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _SCAN_WORKEFFICIENT_KERNEL_8_H_
#define _SCAN_WORKEFFICIENT_KERNEL_8_H_
#
#include "local_macros.h"
#define TIDX (__mul24(blockIdx.x,blockDim.x) + threadIdx.x)
#define TIDY (__mul24(blockIdx.y,blockDim.y) + threadIdx.y)
#define TWIDTH (__mul24(gridDim.x,blockDim.x))
#define THEIGHT (__mul24(gridDim.y,blockDim.y))
#define ArrayID (TIDY*TWIDTH+TIDX)
#define MAKE_FLOAT4(arg) make_float4((arg), (arg), (arg), (arg))
#define MAKE_INT4(arg) make_int4((arg).x, (arg).y, (arg).z, (arg).w);
// Written by NVidia
// Modified by Gordon Erlebacher, Feb. 21, 2008
//----------------------------------------------------------------------
__global__ void scan_workefficient_8(float4 *g_idata, float4 *sum, int4* seeds, int n, int edge, int width)
// Version working with float4's again. Using floats is rather difficult
// hardcoded for 16x16 tiles (minimum size), with 256 threads per block
{
// edge=16, 64 threads: scan each row, one float per thread
// Dynamically allocated shared memory for scan kernels
extern __shared__ float4 temp[];
int numThreads = blockDim.x * blockDim.y;
//if (blockIdx.x != 11) return; // block 13 has serial errors
#if 1
int last_share = edge*edge + ((edge*edge) >> LOG_NUM_BANKS);
if (threadIdx.x == 0) {
int4 ss = seeds[blockIdx.x];
temp[last_share] = make_float4(ss.x+0.1,ss.y+0.1,ss.z+0.1,ss.w+0.1);
//TMP(last_share) = make_float4(ss.x+0.1,ss.y+0.1,ss.z+0.1,ss.w+0.1);
}
__syncthreads();
int4 seed = MAKE_INT4(temp[last_share]); // is int correct? Or must add 0.5?
__syncthreads();
#endif
// get data from global memory (should be coalesced)
int x = seed.x;
int y = seed.y;
// edge should be part of the seed datastructure (per block)
int edge2 = edge >> 2;
int xorig = x - edge2; // one thread per float (edge float4's)
int yorig = y - edge2; // edge rows
// align xorig such that xorig is a multiple of 2 (speedup is evident)
int shift = xorig - ((xorig >> 2) << 2);
if (shift == 1) xorig -= 1;
else if (shift == 2) xorig += 2;
else if (shift == 3) xorig += 1;
int flag1;
float widthi = 1./width;
int WW = width; // array width (argument) (in float4)
int HH = WW; // height of flat texture // MUST READ AS ARGUMENT
int thread_id = threadIdx.x + blockDim.x * threadIdx.y;
//--------------------
// one iteration per row in the square tile
// process 4 rows at a time
__syncthreads();
flag1 = 1;
int xid = xorig + threadIdx.x; // xorig + 0...15
if (xid < 0 || xid >= WW) flag1 = 0;
int yid1 = yorig + threadIdx.y;
if (yid1 < 0 || yid1 >= HH) flag1 = 0;
int arrayid = xid + yid1*WW;
//temp[thread_id].x = float(xid*widthi);
TMP(thread_id).x = float(xid*widthi);
//temp[thread_id].y = float(yid1*widthi);
TMP(thread_id).y = float(yid1*widthi);
#if 1
if (flag1 == 0) {
//temp[thread_id] = make_float4(0.,0.,0.,0.);
TMP(thread_id) = make_float4(0.,0.,0.,0.);
} else {
float f = g_idata[arrayid].w; // ERROR
if (int(f+.1) != seed.w) {
//temp[thread_id] = make_float4(0.,0.,0.,0.);
TMP(thread_id) = make_float4(0.,0.,0.,0.);
} else {
//temp[thread_id].w = 1.0;
TMP(thread_id).w = 1.0;
}
}
#endif
__syncthreads();
#if 0
sum[thread_id] = temp[thread_id];
return;
#endif
// //--------------------
//
//// xorig - edge/2, xorig + edge/2 - 1
//
//// For the 16x16 case (hardcoded), the first pass with 64 threads can
//// only handle 1/2 the domain (1024 floats = 16x16x4). The for loop that
//// follows had a thread handle two floats at a time, so can only handl
//// 1/2 the domain on each pass
//// manually treat each half of the domain
int offset = 1;
int ai, bi;
int sz;
#if 1
sz = 128; // hardcoded for 16x16=256 tile
// build the sum in place up the tree
for (int d = sz; d > 0; d >>= 1) {
__syncthreads();
if (thread_id < d) {
ai = offset*(2*thread_id+1)-1;
bi = offset*(2*thread_id+2)-1;
//temp[bi].x += temp[ai].x;
//temp[bi].y += temp[ai].y;
//temp[bi].w += temp[ai].w;
TMP(bi).x += TMP(ai).x;
TMP(bi).y += TMP(ai).y;
TMP(bi).w += TMP(ai).w;
}
offset <<= 1;
}
#endif
__syncthreads();
#if 0
sum[thread_id] = temp[thread_id];
return;
#endif
// Something wrong with the results
// write results to global memory
__syncthreads();
if (threadIdx.x == (blockDim.x-1)) {
int el = edge*edge-1;
//float nbs = temp[el].w;
float nbs = TMP(el).w;
float nbs1 = 1./(nbs);
if (nbs <= 0.5) nbs1 = 1.;
//sum[blockIdx.x] = make_float4(temp[el].x*nbs1, temp[el].y*nbs1, 0., nbs); // orig
sum[blockIdx.x] = make_float4(TMP(el).x*nbs1, TMP(el).y*nbs1, 0., nbs); // orig
}
}
//----------------------------------------------------------------------
__global__ void scan_workefficient_8_larger(float4 *g_idata, float4 *sum, int4* seeds, int n, int edge, int width)
// Version working with float4's again. Using floats is rather difficult
// hardcoded for 16x16 tiles (minimum size), with 256 threads per block
{
// edge=16, 64 threads: scan each row, one float per thread
// Dynamically allocated shared memory for scan kernels
extern __shared__ float4 temp[];
int numThreads = blockDim.x * blockDim.y;
//if (blockIdx.x != 11) return; // block 13 has serial errors
#if 1
int last_share = edge*edge + ((edge*edge) >> LOG_NUM_BANKS);
if (threadIdx.x == 0) {
int4 ss = seeds[blockIdx.x];
temp[last_share] = make_float4(ss.x+0.1,ss.y+0.1,ss.z+0.1,ss.w+0.1);
//TMP(last_share) = make_float4(ss.x+0.1,ss.y+0.1,ss.z+0.1,ss.w+0.1);
}
__syncthreads();
int4 seed = MAKE_INT4(temp[last_share]); // is int correct? Or must add 0.5?
__syncthreads();
#endif
// get data from global memory (should be coalesced)
int x = seed.x;
int y = seed.y;
// edge should be part of the seed datastructure (per block)
int edge2 = edge >> 2;
int xorig = x - edge; // one thread per float (edge float4's)
int yorig = y - edge; // edge rows
// align xorig such that xorig is a multiple of 2 (speedup is evident)
int shift = xorig - ((xorig >> 2) << 2);
if (shift == 1) xorig -= 1;
else if (shift == 2) xorig += 2;
else if (shift == 3) xorig += 1;
int flag1;
float widthi = 1./width;
int WW = width; // array width (argument) (in float4)
int HH = WW; // height of flat texture // MUST READ AS ARGUMENT
int thread_id = threadIdx.x + blockDim.x * threadIdx.y;
//--------------------
// one iteration per row in the square tile
// process 4 rows at a time
__syncthreads();
flag1 = 1;
int xid = xorig + (threadIdx.x << 2); // xorig + 0...15
if (xid < 0 || xid >= WW) flag1 = 0;
int yid1 = yorig + (threadIdx.y << 2);
if (yid1 < 0 || yid1 >= HH) flag1 = 0;
int arrayid = xid + yid1*WW;
TMP(thread_id).x = float(xid*widthi);
TMP(thread_id).y = float(yid1*widthi);
#if 1
if (flag1 == 0) {
TMP(thread_id) = make_float4(0.,0.,0.,0.);
} else {
float f = g_idata[arrayid].w; // ERROR
if (int(f+.1) != seed.w) {
TMP(thread_id) = make_float4(0.,0.,0.,0.);
} else {
TMP(thread_id).w = 1.0;
}
}
#endif
__syncthreads();
#if 0
sum[thread_id] = temp[thread_id];
return;
#endif
// //--------------------
//
//// xorig - edge/2, xorig + edge/2 - 1
//
//// For the 16x16 case (hardcoded), the first pass with 64 threads can
//// only handle 1/2 the domain (1024 floats = 16x16x4). The for loop that
//// follows had a thread handle two floats at a time, so can only handl
//// 1/2 the domain on each pass
//// manually treat each half of the domain
int offset = 1;
int ai, bi;
int sz;
#if 1
sz = 128; // hardcoded for 16x16=256 tile
// build the sum in place up the tree
for (int d = sz; d > 0; d >>= 1) {
__syncthreads();
if (thread_id < d) {
ai = offset*(2*thread_id+1)-1;
bi = offset*(2*thread_id+2)-1;
//temp[bi].x += temp[ai].x;
//temp[bi].y += temp[ai].y;
//temp[bi].w += temp[ai].w;
TMP(bi).x += TMP(ai).x;
TMP(bi).y += TMP(ai).y;
TMP(bi).w += TMP(ai).w;
}
offset <<= 1;
}
#endif
__syncthreads();
#if 0
sum[thread_id] = temp[thread_id];
return;
#endif
// Something wrong with the results
// write results to global memory
__syncthreads();
if (threadIdx.x == (blockDim.x-1)) {
int el = edge*edge-1;
//float nbs = temp[el].w;
float nbs = TMP(el).w;
float nbs1 = 1./(nbs);
if (nbs <= 0.5) nbs1 = 1.;
//sum[blockIdx.x] = make_float4(temp[el].x*nbs1, temp[el].y*nbs1, 0., nbs); // orig
sum[blockIdx.x] = make_float4(TMP(el).x*nbs1, TMP(el).y*nbs1, 0., nbs); // orig
}
}
//----------------------------------------------------------------------
#endif
| f67f3606549372745203e18dffa3876529fc3a07.cu | #ifndef _SCAN_WORKEFFICIENT_KERNEL_8_H_
#define _SCAN_WORKEFFICIENT_KERNEL_8_H_
#
#include "local_macros.h"
#define TIDX (__mul24(blockIdx.x,blockDim.x) + threadIdx.x)
#define TIDY (__mul24(blockIdx.y,blockDim.y) + threadIdx.y)
#define TWIDTH (__mul24(gridDim.x,blockDim.x))
#define THEIGHT (__mul24(gridDim.y,blockDim.y))
#define ArrayID (TIDY*TWIDTH+TIDX)
#define MAKE_FLOAT4(arg) make_float4((arg), (arg), (arg), (arg))
#define MAKE_INT4(arg) make_int4((arg).x, (arg).y, (arg).z, (arg).w);
// Written by NVidia
// Modified by Gordon Erlebacher, Feb. 21, 2008
//----------------------------------------------------------------------
__global__ void scan_workefficient_8(float4 *g_idata, float4 *sum, int4* seeds, int n, int edge, int width)
// Version working with float4's again. Using floats is rather difficult
// hardcoded for 16x16 tiles (minimum size), with 256 threads per block
{
// edge=16, 64 threads: scan each row, one float per thread
// Dynamically allocated shared memory for scan kernels
extern __shared__ float4 temp[];
int numThreads = blockDim.x * blockDim.y;
//if (blockIdx.x != 11) return; // block 13 has serial errors
#if 1
int last_share = edge*edge + ((edge*edge) >> LOG_NUM_BANKS);
if (threadIdx.x == 0) {
int4 ss = seeds[blockIdx.x];
temp[last_share] = make_float4(ss.x+0.1,ss.y+0.1,ss.z+0.1,ss.w+0.1);
//TMP(last_share) = make_float4(ss.x+0.1,ss.y+0.1,ss.z+0.1,ss.w+0.1);
}
__syncthreads();
int4 seed = MAKE_INT4(temp[last_share]); // is int correct? Or must add 0.5?
__syncthreads();
#endif
// get data from global memory (should be coalesced)
int x = seed.x;
int y = seed.y;
// edge should be part of the seed datastructure (per block)
int edge2 = edge >> 2;
int xorig = x - edge2; // one thread per float (edge float4's)
int yorig = y - edge2; // edge rows
// align xorig such that xorig is a multiple of 2 (speedup is evident)
int shift = xorig - ((xorig >> 2) << 2);
if (shift == 1) xorig -= 1;
else if (shift == 2) xorig += 2;
else if (shift == 3) xorig += 1;
int flag1;
float widthi = 1./width;
int WW = width; // array width (argument) (in float4)
int HH = WW; // height of flat texture // MUST READ AS ARGUMENT
int thread_id = threadIdx.x + blockDim.x * threadIdx.y;
//--------------------
// one iteration per row in the square tile
// process 4 rows at a time
__syncthreads();
flag1 = 1;
int xid = xorig + threadIdx.x; // xorig + 0...15
if (xid < 0 || xid >= WW) flag1 = 0;
int yid1 = yorig + threadIdx.y;
if (yid1 < 0 || yid1 >= HH) flag1 = 0;
int arrayid = xid + yid1*WW;
//temp[thread_id].x = float(xid*widthi);
TMP(thread_id).x = float(xid*widthi);
//temp[thread_id].y = float(yid1*widthi);
TMP(thread_id).y = float(yid1*widthi);
#if 1
if (flag1 == 0) {
//temp[thread_id] = make_float4(0.,0.,0.,0.);
TMP(thread_id) = make_float4(0.,0.,0.,0.);
} else {
float f = g_idata[arrayid].w; // ERROR
if (int(f+.1) != seed.w) {
//temp[thread_id] = make_float4(0.,0.,0.,0.);
TMP(thread_id) = make_float4(0.,0.,0.,0.);
} else {
//temp[thread_id].w = 1.0;
TMP(thread_id).w = 1.0;
}
}
#endif
__syncthreads();
#if 0
sum[thread_id] = temp[thread_id];
return;
#endif
// //--------------------
//
//// xorig - edge/2, xorig + edge/2 - 1
//
//// For the 16x16 case (hardcoded), the first pass with 64 threads can
//// only handle 1/2 the domain (1024 floats = 16x16x4). The for loop that
//// follows had a thread handle two floats at a time, so can only handl
//// 1/2 the domain on each pass
//// manually treat each half of the domain
int offset = 1;
int ai, bi;
int sz;
#if 1
sz = 128; // hardcoded for 16x16=256 tile
// build the sum in place up the tree
for (int d = sz; d > 0; d >>= 1) {
__syncthreads();
if (thread_id < d) {
ai = offset*(2*thread_id+1)-1;
bi = offset*(2*thread_id+2)-1;
//temp[bi].x += temp[ai].x;
//temp[bi].y += temp[ai].y;
//temp[bi].w += temp[ai].w;
TMP(bi).x += TMP(ai).x;
TMP(bi).y += TMP(ai).y;
TMP(bi).w += TMP(ai).w;
}
offset <<= 1;
}
#endif
__syncthreads();
#if 0
sum[thread_id] = temp[thread_id];
return;
#endif
// Something wrong with the results
// write results to global memory
__syncthreads();
if (threadIdx.x == (blockDim.x-1)) {
int el = edge*edge-1;
//float nbs = temp[el].w;
float nbs = TMP(el).w;
float nbs1 = 1./(nbs);
if (nbs <= 0.5) nbs1 = 1.;
//sum[blockIdx.x] = make_float4(temp[el].x*nbs1, temp[el].y*nbs1, 0., nbs); // orig
sum[blockIdx.x] = make_float4(TMP(el).x*nbs1, TMP(el).y*nbs1, 0., nbs); // orig
}
}
//----------------------------------------------------------------------
__global__ void scan_workefficient_8_larger(float4 *g_idata, float4 *sum, int4* seeds, int n, int edge, int width)
// Version working with float4's again. Using floats is rather difficult
// hardcoded for 16x16 tiles (minimum size), with 256 threads per block
{
// edge=16, 64 threads: scan each row, one float per thread
// Dynamically allocated shared memory for scan kernels
extern __shared__ float4 temp[];
int numThreads = blockDim.x * blockDim.y;
//if (blockIdx.x != 11) return; // block 13 has serial errors
#if 1
int last_share = edge*edge + ((edge*edge) >> LOG_NUM_BANKS);
if (threadIdx.x == 0) {
int4 ss = seeds[blockIdx.x];
temp[last_share] = make_float4(ss.x+0.1,ss.y+0.1,ss.z+0.1,ss.w+0.1);
//TMP(last_share) = make_float4(ss.x+0.1,ss.y+0.1,ss.z+0.1,ss.w+0.1);
}
__syncthreads();
int4 seed = MAKE_INT4(temp[last_share]); // is int correct? Or must add 0.5?
__syncthreads();
#endif
// get data from global memory (should be coalesced)
int x = seed.x;
int y = seed.y;
// edge should be part of the seed datastructure (per block)
int edge2 = edge >> 2;
int xorig = x - edge; // one thread per float (edge float4's)
int yorig = y - edge; // edge rows
// align xorig such that xorig is a multiple of 2 (speedup is evident)
int shift = xorig - ((xorig >> 2) << 2);
if (shift == 1) xorig -= 1;
else if (shift == 2) xorig += 2;
else if (shift == 3) xorig += 1;
int flag1;
float widthi = 1./width;
int WW = width; // array width (argument) (in float4)
int HH = WW; // height of flat texture // MUST READ AS ARGUMENT
int thread_id = threadIdx.x + blockDim.x * threadIdx.y;
//--------------------
// one iteration per row in the square tile
// process 4 rows at a time
__syncthreads();
flag1 = 1;
int xid = xorig + (threadIdx.x << 2); // xorig + 0...15
if (xid < 0 || xid >= WW) flag1 = 0;
int yid1 = yorig + (threadIdx.y << 2);
if (yid1 < 0 || yid1 >= HH) flag1 = 0;
int arrayid = xid + yid1*WW;
TMP(thread_id).x = float(xid*widthi);
TMP(thread_id).y = float(yid1*widthi);
#if 1
if (flag1 == 0) {
TMP(thread_id) = make_float4(0.,0.,0.,0.);
} else {
float f = g_idata[arrayid].w; // ERROR
if (int(f+.1) != seed.w) {
TMP(thread_id) = make_float4(0.,0.,0.,0.);
} else {
TMP(thread_id).w = 1.0;
}
}
#endif
__syncthreads();
#if 0
sum[thread_id] = temp[thread_id];
return;
#endif
// //--------------------
//
//// xorig - edge/2, xorig + edge/2 - 1
//
//// For the 16x16 case (hardcoded), the first pass with 64 threads can
//// only handle 1/2 the domain (1024 floats = 16x16x4). The for loop that
//// follows had a thread handle two floats at a time, so can only handl
//// 1/2 the domain on each pass
//// manually treat each half of the domain
int offset = 1;
int ai, bi;
int sz;
#if 1
sz = 128; // hardcoded for 16x16=256 tile
// build the sum in place up the tree
for (int d = sz; d > 0; d >>= 1) {
__syncthreads();
if (thread_id < d) {
ai = offset*(2*thread_id+1)-1;
bi = offset*(2*thread_id+2)-1;
//temp[bi].x += temp[ai].x;
//temp[bi].y += temp[ai].y;
//temp[bi].w += temp[ai].w;
TMP(bi).x += TMP(ai).x;
TMP(bi).y += TMP(ai).y;
TMP(bi).w += TMP(ai).w;
}
offset <<= 1;
}
#endif
__syncthreads();
#if 0
sum[thread_id] = temp[thread_id];
return;
#endif
// Something wrong with the results
// write results to global memory
__syncthreads();
if (threadIdx.x == (blockDim.x-1)) {
int el = edge*edge-1;
//float nbs = temp[el].w;
float nbs = TMP(el).w;
float nbs1 = 1./(nbs);
if (nbs <= 0.5) nbs1 = 1.;
//sum[blockIdx.x] = make_float4(temp[el].x*nbs1, temp[el].y*nbs1, 0., nbs); // orig
sum[blockIdx.x] = make_float4(TMP(el).x*nbs1, TMP(el).y*nbs1, 0., nbs); // orig
}
}
//----------------------------------------------------------------------
#endif
|
718676f29bb20b66429708a9b21fabaeb41b4677.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#define N 300000
#define NSTREAM 4
__global__ void kernel_1()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_2()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_3()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_4()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
int main(int argc, char **argv)
{
int n_streams = NSTREAM;
int isize = 1;
int iblock = 1;
int bigcase = 0;
if (argc > 1) n_streams = atoi(argv[1]);
if (argc > 2) bigcase = atoi(argv[2]);
float elapsed_time;
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("> Using Device %d: %s with num_streams=%d\n", dev, deviceProp.name, n_streams);
CHECK(hipSetDevice(dev));
if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5))
{
if (deviceProp.concurrentKernels == 0)
{
printf("> GPU does not support concurrent kernel execution (SM 3.5 or higher required)\n");
printf("> CUDA kernel runs will be serialized\n");
}
else
{
printf("> GPU does not support HyperQ\n");
printf("> CUDA kernel runs will have limited concurrency\n");
}
}
printf("> Compute Capability %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
hipStream_t *streams = (hipStream_t *) malloc(n_streams * sizeof(hipStream_t));
for (int i = 0; i < n_streams; i++)
{
CHECK(hipStreamCreate(&(streams[i])));
}
if (bigcase == 1)
{
iblock = 512;
isize = 1 << 12;
}
dim3 block(iblock);
dim3 grid(isize / iblock);
hipEvent_t start, stop;
CHECK(hipEventCreate(&start));
CHECK(hipEventCreate(&stop));
CHECK(hipEventRecord(start, 0));
// breadth first
for (int i = 0; i < n_streams; i++)
hipLaunchKernelGGL(( kernel_1), dim3(grid), dim3(block), 0, streams[i], );
for (int i = 0; i < n_streams; i++)
hipLaunchKernelGGL(( kernel_2), dim3(grid), dim3(block), 0, streams[i], );
for (int i = 0; i < n_streams; i++)
hipLaunchKernelGGL(( kernel_3), dim3(grid), dim3(block), 0, streams[i], );
for (int i = 0; i < n_streams; i++)
hipLaunchKernelGGL(( kernel_4), dim3(grid), dim3(block), 0, streams[i], );
CHECK(hipEventRecord(stop, 0));
CHECK(hipEventSynchronize(stop));
CHECK(hipEventElapsedTime(&elapsed_time, start, stop));
printf("Measured time for parallel execution = %f \n", elapsed_time);
for (int i = 0; i < n_streams; i++)
{
CHECK(hipStreamDestroy(streams[i]));
}
free(streams);
CHECK(hipEventDestroy(start));
CHECK(hipEventDestroy(stop));
CHECK(hipDeviceReset());
return 0;
}
| 718676f29bb20b66429708a9b21fabaeb41b4677.cu | #include "../common/common.h"
#include <stdio.h>
#include <cuda_runtime.h>
#include <stdlib.h>
#define N 300000
#define NSTREAM 4
__global__ void kernel_1()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_2()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_3()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_4()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
int main(int argc, char **argv)
{
int n_streams = NSTREAM;
int isize = 1;
int iblock = 1;
int bigcase = 0;
if (argc > 1) n_streams = atoi(argv[1]);
if (argc > 2) bigcase = atoi(argv[2]);
float elapsed_time;
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("> Using Device %d: %s with num_streams=%d\n", dev, deviceProp.name, n_streams);
CHECK(cudaSetDevice(dev));
if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5))
{
if (deviceProp.concurrentKernels == 0)
{
printf("> GPU does not support concurrent kernel execution (SM 3.5 or higher required)\n");
printf("> CUDA kernel runs will be serialized\n");
}
else
{
printf("> GPU does not support HyperQ\n");
printf("> CUDA kernel runs will have limited concurrency\n");
}
}
printf("> Compute Capability %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
cudaStream_t *streams = (cudaStream_t *) malloc(n_streams * sizeof(cudaStream_t));
for (int i = 0; i < n_streams; i++)
{
CHECK(cudaStreamCreate(&(streams[i])));
}
if (bigcase == 1)
{
iblock = 512;
isize = 1 << 12;
}
dim3 block(iblock);
dim3 grid(isize / iblock);
cudaEvent_t start, stop;
CHECK(cudaEventCreate(&start));
CHECK(cudaEventCreate(&stop));
CHECK(cudaEventRecord(start, 0));
// breadth first
for (int i = 0; i < n_streams; i++)
kernel_1<<<grid, block, 0, streams[i]>>>();
for (int i = 0; i < n_streams; i++)
kernel_2<<<grid, block, 0, streams[i]>>>();
for (int i = 0; i < n_streams; i++)
kernel_3<<<grid, block, 0, streams[i]>>>();
for (int i = 0; i < n_streams; i++)
kernel_4<<<grid, block, 0, streams[i]>>>();
CHECK(cudaEventRecord(stop, 0));
CHECK(cudaEventSynchronize(stop));
CHECK(cudaEventElapsedTime(&elapsed_time, start, stop));
printf("Measured time for parallel execution = %f \n", elapsed_time);
for (int i = 0; i < n_streams; i++)
{
CHECK(cudaStreamDestroy(streams[i]));
}
free(streams);
CHECK(cudaEventDestroy(start));
CHECK(cudaEventDestroy(stop));
CHECK(cudaDeviceReset());
return 0;
}
|
8a9ceb80a2cfe9b91e6da549846372e1488e8cc6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "baseLatticeForce.cuh"
/*! \file baseLatticeForce.cu */
/** @addtogroup forceKernels force Kernels
* @{
*/
__global__ void gpu_lattice_spin_force_nn_kernel(dVec *d_force,
dVec *d_spins,
Index3D latticeIndex,
scalar J,
int N,
int L,
bool zeroForce)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N)
return;
if(zeroForce)
d_force[idx] = make_dVec(0.0);
int3 target = latticeIndex.inverseIndex(idx);
/*
int smx,spx,smy,spy,smz,spz;
smx = latticeIndex(wrap(target.x-1,L),target.y,target.z);
spx = latticeIndex(wrap(target.x+1,L),target.y,target.z);
smy = latticeIndex(target.x,wrap(target.y-1,L),target.z);
spy = latticeIndex(target.x,wrap(target.y+1,L),target.z);
smz = latticeIndex(target.x,target.y,wrap(target.z-1,L));
spz = latticeIndex(target.x,target.y,wrap(target.z+1,L));
if(zeroForce)
d_force[idx] = J*(d_spins[smx]+d_spins[spx] + d_spins[smy]+d_spins[spy] + d_spins[smz]+d_spins[spz]) ;
else
d_force[idx] += J*(d_spins[smx]+d_spins[spx] + d_spins[smy]+d_spins[spy] + d_spins[smz]+d_spins[spz]) ;
*/
dVec smx,spx,smy,spy,smz,spz,ans;
smx = d_spins[latticeIndex(wrap(target.x-1,L),target.y,target.z)];
spx = d_spins[latticeIndex(wrap(target.x+1,L),target.y,target.z)];
smy = d_spins[latticeIndex(target.x,wrap(target.y-1,L),target.z)];
spy = d_spins[latticeIndex(target.x,wrap(target.y+1,L),target.z)];
smz = d_spins[latticeIndex(target.x,target.y,wrap(target.z-1,L))];
spz = d_spins[latticeIndex(target.x,target.y,wrap(target.z+1,L))];
ans = J*(smx+smy+smz+spx+spy+spz);
if(zeroForce)
d_force[idx] = ans ;
else
d_force[idx] += ans ;
}
bool gpu_lattice_spin_force_nn(dVec *d_force,
dVec *d_spins,
Index3D latticeIndex,
scalar J,
int N,
bool zeroForce,
int maxBlockSize)
{
unsigned int block_size = maxBlockSize;
unsigned int nblocks = N/block_size+1;
hipLaunchKernelGGL(( gpu_lattice_spin_force_nn_kernel), dim3(nblocks),dim3(block_size), 0, 0, d_force,d_spins,latticeIndex,J,N,
latticeIndex.getSizes().x,zeroForce);
HANDLE_ERROR(hipGetLastError());
return hipSuccess;
}
/** @} */ //end of group declaration
| 8a9ceb80a2cfe9b91e6da549846372e1488e8cc6.cu | #include "baseLatticeForce.cuh"
/*! \file baseLatticeForce.cu */
/** @addtogroup forceKernels force Kernels
* @{
*/
__global__ void gpu_lattice_spin_force_nn_kernel(dVec *d_force,
dVec *d_spins,
Index3D latticeIndex,
scalar J,
int N,
int L,
bool zeroForce)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N)
return;
if(zeroForce)
d_force[idx] = make_dVec(0.0);
int3 target = latticeIndex.inverseIndex(idx);
/*
int smx,spx,smy,spy,smz,spz;
smx = latticeIndex(wrap(target.x-1,L),target.y,target.z);
spx = latticeIndex(wrap(target.x+1,L),target.y,target.z);
smy = latticeIndex(target.x,wrap(target.y-1,L),target.z);
spy = latticeIndex(target.x,wrap(target.y+1,L),target.z);
smz = latticeIndex(target.x,target.y,wrap(target.z-1,L));
spz = latticeIndex(target.x,target.y,wrap(target.z+1,L));
if(zeroForce)
d_force[idx] = J*(d_spins[smx]+d_spins[spx] + d_spins[smy]+d_spins[spy] + d_spins[smz]+d_spins[spz]) ;
else
d_force[idx] += J*(d_spins[smx]+d_spins[spx] + d_spins[smy]+d_spins[spy] + d_spins[smz]+d_spins[spz]) ;
*/
dVec smx,spx,smy,spy,smz,spz,ans;
smx = d_spins[latticeIndex(wrap(target.x-1,L),target.y,target.z)];
spx = d_spins[latticeIndex(wrap(target.x+1,L),target.y,target.z)];
smy = d_spins[latticeIndex(target.x,wrap(target.y-1,L),target.z)];
spy = d_spins[latticeIndex(target.x,wrap(target.y+1,L),target.z)];
smz = d_spins[latticeIndex(target.x,target.y,wrap(target.z-1,L))];
spz = d_spins[latticeIndex(target.x,target.y,wrap(target.z+1,L))];
ans = J*(smx+smy+smz+spx+spy+spz);
if(zeroForce)
d_force[idx] = ans ;
else
d_force[idx] += ans ;
}
bool gpu_lattice_spin_force_nn(dVec *d_force,
dVec *d_spins,
Index3D latticeIndex,
scalar J,
int N,
bool zeroForce,
int maxBlockSize)
{
unsigned int block_size = maxBlockSize;
unsigned int nblocks = N/block_size+1;
gpu_lattice_spin_force_nn_kernel<<<nblocks,block_size>>>(d_force,d_spins,latticeIndex,J,N,
latticeIndex.getSizes().x,zeroForce);
HANDLE_ERROR(cudaGetLastError());
return cudaSuccess;
}
/** @} */ //end of group declaration
|
b15d44dae1bce821dee5bddd5688a577c891b2ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Simple code to check whether there a working CUDA runtime + driver + GPU device
* combination present in the system.
*
* The expected result of this program is the CUDA runtime and driver API version
* printed on the command line and a confirmation that a test kernel has been
* successfully executed on the CUDA GPU.
*
* Compile with: nvcc check-cuda-drv-runtime.cu -o chk
* Then run: ./chk
* Expected outputs:
* - everything working fine (CUDA 7.5 driver + runtime):
* CUDA driver version: 7050
* CUDA runtime version: 7050
* Test kernel executed successfully!
*
* - no device detected:
* CUDA driver version: 7050
* hipRuntimeGetVersion failed: no CUDA-capable device is detected
*
* - runtime / driver mismatch (driver ver < runtime ver):
* CUDA driver version: 7050
* hipRuntimeGetVersion failed: CUDA driver version is insufficient for CUDA runtime version
*
* Author: Szilrd Pll (sin.pecado@gmail.com)
*
*/
#include <cstdio>
__global__ void test_kernel() {}
static void check_cuda_retval(hipError_t status, const char* msg)
{
if (status != hipSuccess)
{
fprintf(stderr, "%s: %s\n", msg, hipGetErrorString(status));
exit(1);
}
}
int main()
{
hipError_t stat;
int rt_ver = 0, drv_ver = 0;
stat = hipDriverGetVersion(&drv_ver);
check_cuda_retval(stat, "hipDriverGetVersion failed");
printf("CUDA driver version: %d\n", drv_ver);
stat = hipRuntimeGetVersion(&rt_ver);
check_cuda_retval(stat, "hipRuntimeGetVersion failed");
printf("CUDA runtime version: %d\n", rt_ver);
hipLaunchKernelGGL(( test_kernel), dim3(1), dim3(512), 0, 0, );
stat = hipDeviceSynchronize();
check_cuda_retval(stat, "test kernel launch failed");
printf("Test kernel executed successfully!\n");
return 0;
}
| b15d44dae1bce821dee5bddd5688a577c891b2ec.cu | /* Simple code to check whether there a working CUDA runtime + driver + GPU device
* combination present in the system.
*
* The expected result of this program is the CUDA runtime and driver API version
* printed on the command line and a confirmation that a test kernel has been
* successfully executed on the CUDA GPU.
*
* Compile with: nvcc check-cuda-drv-runtime.cu -o chk
* Then run: ./chk
* Expected outputs:
* - everything working fine (CUDA 7.5 driver + runtime):
* CUDA driver version: 7050
* CUDA runtime version: 7050
* Test kernel executed successfully!
*
* - no device detected:
* CUDA driver version: 7050
* cudaRuntimeGetVersion failed: no CUDA-capable device is detected
*
* - runtime / driver mismatch (driver ver < runtime ver):
* CUDA driver version: 7050
* cudaRuntimeGetVersion failed: CUDA driver version is insufficient for CUDA runtime version
*
* Author: Szilárd Páll (sin.pecado@gmail.com)
*
*/
#include <cstdio>
__global__ void test_kernel() {}
static void check_cuda_retval(cudaError_t status, const char* msg)
{
if (status != cudaSuccess)
{
fprintf(stderr, "%s: %s\n", msg, cudaGetErrorString(status));
exit(1);
}
}
int main()
{
cudaError_t stat;
int rt_ver = 0, drv_ver = 0;
stat = cudaDriverGetVersion(&drv_ver);
check_cuda_retval(stat, "cudaDriverGetVersion failed");
printf("CUDA driver version: %d\n", drv_ver);
stat = cudaRuntimeGetVersion(&rt_ver);
check_cuda_retval(stat, "cudaRuntimeGetVersion failed");
printf("CUDA runtime version: %d\n", rt_ver);
test_kernel<<<1, 512, 0>>>();
stat = cudaThreadSynchronize();
check_cuda_retval(stat, "test kernel launch failed");
printf("Test kernel executed successfully!\n");
return 0;
}
|
7c5991122117d19f24eec12f4a088323d6691844.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
/*
* This file includes simple demonstrations of a variety of shuffle
* instructions.
*/
#define BDIMX 16
#define SEGM 4
void printData(int *in, const int size)
{
for (int i = 0; i < size; i++)
{
printf("%2d ", in[i]);
}
printf("\n");
}
__global__ void test_shfl_broadcast(int *d_out, int *d_in, int const srcLane)
{
int value = d_in[threadIdx.x];
value = __shfl(value, srcLane, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_wrap (int *d_out, int *d_in, int const offset)
{
int value = d_in[threadIdx.x];
value = __shfl(value, threadIdx.x + offset, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_wrap_plus (int *d_out, int *d_in, int const offset)
{
int value = d_in[threadIdx.x];
value += __shfl(value, threadIdx.x + offset, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_up(int *d_out, int *d_in, unsigned int const delta)
{
int value = d_in[threadIdx.x];
value = __shfl_up (value, delta, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_down(int *d_out, int *d_in, unsigned int const delta)
{
int value = d_in[threadIdx.x];
value = __shfl_down (value, delta, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_xor(int *d_out, int *d_in, int const mask)
{
int value = d_in[threadIdx.x];
value = __shfl_xor (value, mask, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_xor_array(int *d_out, int *d_in, int const mask)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
value[0] = __shfl_xor (value[0], mask, BDIMX);
value[1] = __shfl_xor (value[1], mask, BDIMX);
value[2] = __shfl_xor (value[2], mask, BDIMX);
value[3] = __shfl_xor (value[3], mask, BDIMX);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_xor_int4(int *d_out, int *d_in, int const mask)
{
int idx = threadIdx.x * SEGM;
int4 value;
value.x = d_in[idx];
value.y = d_in[idx + 1];
value.z = d_in[idx + 2];
value.w = d_in[idx + 3];
value.x = __shfl_xor (value.x, mask, BDIMX);
value.y = __shfl_xor (value.y, mask, BDIMX);
value.z = __shfl_xor (value.z, mask, BDIMX);
value.w = __shfl_xor (value.w, mask, BDIMX);
d_out[idx] = value.x;
d_out[idx + 1] = value.y;
d_out[idx + 2] = value.z;
d_out[idx + 3] = value.w;
}
__global__ void test_shfl_xor_element(int *d_out, int *d_in, int const mask,
int srcIdx, int dstIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
value[srcIdx] = __shfl_xor (value[dstIdx], mask, BDIMX);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_xor_array_swap (int *d_out, int *d_in, int const mask,
int srcIdx, int dstIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
bool pred = ((threadIdx.x & 1) != mask);
if (pred)
{
int tmp = value[srcIdx];
value[srcIdx] = value[dstIdx];
value[dstIdx] = tmp;
}
value[dstIdx] = __shfl_xor (value[dstIdx], mask, BDIMX);
if (pred)
{
int tmp = value[srcIdx];
value[srcIdx] = value[dstIdx];
value[dstIdx] = tmp;
}
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__inline__ __device__
void swap_old(int *value, int tid, int mask, int srcIdx, int dstIdx)
{
bool pred = ((tid / mask + 1) == 1);
if (pred)
{
int tmp = value[srcIdx];
value[srcIdx] = value[dstIdx];
value[dstIdx] = tmp;
}
value[dstIdx] = __shfl_xor (value[dstIdx], mask, BDIMX);
if (pred)
{
int tmp = value[srcIdx];
value[srcIdx] = value[dstIdx];
value[dstIdx] = tmp;
}
}
__inline__ __device__
void swap(int *value, int laneIdx, int mask, int firstIdx, int secondIdx)
{
bool pred = ((laneIdx / mask + 1) == 1);
if (pred)
{
int tmp = value[firstIdx];
value[firstIdx] = value[secondIdx];
value[secondIdx] = tmp;
}
value[secondIdx] = __shfl_xor (value[secondIdx], mask, BDIMX);
if (pred)
{
int tmp = value[firstIdx];
value[firstIdx] = value[secondIdx];
value[secondIdx] = tmp;
}
}
__global__ void test_shfl_swap_old (int *d_out, int *d_in, int const mask,
int srcIdx, int dstIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
swap(value, threadIdx.x, mask, srcIdx, dstIdx);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__
void test_shfl_swap (int *d_out, int *d_in, int const mask, int firstIdx,
int secondIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
swap(value, threadIdx.x, mask, firstIdx, secondIdx);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_xor_array_swap_base (int *d_out, int *d_in,
int const mask, int srcIdx, int dstIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
value[dstIdx] = __shfl_xor (value[dstIdx], mask, BDIMX);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_array(int *d_out, int *d_in, int const offset)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
int lane = (offset + threadIdx.x) % SEGM;
value[0] = __shfl (value[3], lane, BDIMX);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_xor_plus(int *d_out, int *d_in, int const mask)
{
int value = d_in[threadIdx.x];
value += __shfl_xor (value, mask, BDIMX);
d_out[threadIdx.x] = value;
}
int main(int argc, char **argv)
{
int dev = 0;
bool iPrintout = 1;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("> %s Starting.", argv[0]);
printf("at Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
int nElem = BDIMX;
int h_inData[BDIMX], h_outData[BDIMX];
for (int i = 0; i < nElem; i++) h_inData[i] = i;
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
size_t nBytes = nElem * sizeof(int);
int *d_inData, *d_outData;
CHECK(hipMalloc((int**)&d_inData, nBytes));
CHECK(hipMalloc((int**)&d_outData, nBytes));
CHECK(hipMemcpy(d_inData, h_inData, nBytes, hipMemcpyHostToDevice));
int block = BDIMX;
// shfl bcast
hipLaunchKernelGGL(( test_shfl_broadcast), dim3(1), dim3(block), 0, 0, d_outData, d_inData, 2);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl bcast\t\t: ");
printData(h_outData, nElem);
}
// shfl offset
hipLaunchKernelGGL(( test_shfl_wrap), dim3(1), dim3(block), 0, 0, d_outData, d_inData, -2);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl wrap right\t\t: ");
printData(h_outData, nElem);
}
// shfl up
hipLaunchKernelGGL(( test_shfl_up), dim3(1), dim3(block), 0, 0, d_outData, d_inData, 2);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl up \t\t: ");
printData(h_outData, nElem);
}
// shfl offset
hipLaunchKernelGGL(( test_shfl_wrap), dim3(1), dim3(block), 0, 0, d_outData, d_inData, 2);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl wrap left\t\t: ");
printData(h_outData, nElem);
}
// shfl offset
hipLaunchKernelGGL(( test_shfl_wrap), dim3(1), dim3(block), 0, 0, d_outData, d_inData, 2);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl wrap 2\t\t: ");
printData(h_outData, nElem);
}
// shfl down
hipLaunchKernelGGL(( test_shfl_down), dim3(1), dim3(block), 0, 0, d_outData, d_inData, 2);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl down \t\t: ");
printData(h_outData, nElem);
}
// shfl xor
hipLaunchKernelGGL(( test_shfl_xor), dim3(1), dim3(block), 0, 0, d_outData, d_inData, 1);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl xor 1\t\t: ");
printData(h_outData, nElem);
}
hipLaunchKernelGGL(( test_shfl_xor), dim3(1), dim3(block), 0, 0, d_outData, d_inData, -8);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl xor -1\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - int4
hipLaunchKernelGGL(( test_shfl_xor_int4), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl int4 1\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - register array
hipLaunchKernelGGL(( test_shfl_xor_array), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl array 1\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - test_shfl_xor_element
hipLaunchKernelGGL(( test_shfl_xor_element), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1, 0, 3);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl idx \t\t: ");
printData(h_outData, nElem);
}
// shfl xor - swap
hipLaunchKernelGGL(( test_shfl_xor_array_swap_base), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1,
0, 3);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl swap base\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - swap
hipLaunchKernelGGL(( test_shfl_xor_array_swap), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1, 0, 3);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl swap 0 3\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - swap
hipLaunchKernelGGL(( test_shfl_swap), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1, 0, 3);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl swap inline\t: ");
printData(h_outData, nElem);
}
// shfl xor - register array
hipLaunchKernelGGL(( test_shfl_array), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl array \t\t: ");
printData(h_outData, nElem);
}
// finishing
CHECK(hipFree(d_inData));
CHECK(hipFree(d_outData));
CHECK(hipDeviceReset(); );
return EXIT_SUCCESS;
}
| 7c5991122117d19f24eec12f4a088323d6691844.cu | #include "../common/common.h"
#include <stdio.h>
#include <cuda_runtime.h>
/*
* This file includes simple demonstrations of a variety of shuffle
* instructions.
*/
#define BDIMX 16
#define SEGM 4
void printData(int *in, const int size)
{
for (int i = 0; i < size; i++)
{
printf("%2d ", in[i]);
}
printf("\n");
}
__global__ void test_shfl_broadcast(int *d_out, int *d_in, int const srcLane)
{
int value = d_in[threadIdx.x];
value = __shfl(value, srcLane, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_wrap (int *d_out, int *d_in, int const offset)
{
int value = d_in[threadIdx.x];
value = __shfl(value, threadIdx.x + offset, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_wrap_plus (int *d_out, int *d_in, int const offset)
{
int value = d_in[threadIdx.x];
value += __shfl(value, threadIdx.x + offset, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_up(int *d_out, int *d_in, unsigned int const delta)
{
int value = d_in[threadIdx.x];
value = __shfl_up (value, delta, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_down(int *d_out, int *d_in, unsigned int const delta)
{
int value = d_in[threadIdx.x];
value = __shfl_down (value, delta, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_xor(int *d_out, int *d_in, int const mask)
{
int value = d_in[threadIdx.x];
value = __shfl_xor (value, mask, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_xor_array(int *d_out, int *d_in, int const mask)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
value[0] = __shfl_xor (value[0], mask, BDIMX);
value[1] = __shfl_xor (value[1], mask, BDIMX);
value[2] = __shfl_xor (value[2], mask, BDIMX);
value[3] = __shfl_xor (value[3], mask, BDIMX);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_xor_int4(int *d_out, int *d_in, int const mask)
{
int idx = threadIdx.x * SEGM;
int4 value;
value.x = d_in[idx];
value.y = d_in[idx + 1];
value.z = d_in[idx + 2];
value.w = d_in[idx + 3];
value.x = __shfl_xor (value.x, mask, BDIMX);
value.y = __shfl_xor (value.y, mask, BDIMX);
value.z = __shfl_xor (value.z, mask, BDIMX);
value.w = __shfl_xor (value.w, mask, BDIMX);
d_out[idx] = value.x;
d_out[idx + 1] = value.y;
d_out[idx + 2] = value.z;
d_out[idx + 3] = value.w;
}
__global__ void test_shfl_xor_element(int *d_out, int *d_in, int const mask,
int srcIdx, int dstIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
value[srcIdx] = __shfl_xor (value[dstIdx], mask, BDIMX);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_xor_array_swap (int *d_out, int *d_in, int const mask,
int srcIdx, int dstIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
bool pred = ((threadIdx.x & 1) != mask);
if (pred)
{
int tmp = value[srcIdx];
value[srcIdx] = value[dstIdx];
value[dstIdx] = tmp;
}
value[dstIdx] = __shfl_xor (value[dstIdx], mask, BDIMX);
if (pred)
{
int tmp = value[srcIdx];
value[srcIdx] = value[dstIdx];
value[dstIdx] = tmp;
}
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__inline__ __device__
void swap_old(int *value, int tid, int mask, int srcIdx, int dstIdx)
{
bool pred = ((tid / mask + 1) == 1);
if (pred)
{
int tmp = value[srcIdx];
value[srcIdx] = value[dstIdx];
value[dstIdx] = tmp;
}
value[dstIdx] = __shfl_xor (value[dstIdx], mask, BDIMX);
if (pred)
{
int tmp = value[srcIdx];
value[srcIdx] = value[dstIdx];
value[dstIdx] = tmp;
}
}
__inline__ __device__
void swap(int *value, int laneIdx, int mask, int firstIdx, int secondIdx)
{
bool pred = ((laneIdx / mask + 1) == 1);
if (pred)
{
int tmp = value[firstIdx];
value[firstIdx] = value[secondIdx];
value[secondIdx] = tmp;
}
value[secondIdx] = __shfl_xor (value[secondIdx], mask, BDIMX);
if (pred)
{
int tmp = value[firstIdx];
value[firstIdx] = value[secondIdx];
value[secondIdx] = tmp;
}
}
__global__ void test_shfl_swap_old (int *d_out, int *d_in, int const mask,
int srcIdx, int dstIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
swap(value, threadIdx.x, mask, srcIdx, dstIdx);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__
void test_shfl_swap (int *d_out, int *d_in, int const mask, int firstIdx,
int secondIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
swap(value, threadIdx.x, mask, firstIdx, secondIdx);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_xor_array_swap_base (int *d_out, int *d_in,
int const mask, int srcIdx, int dstIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
value[dstIdx] = __shfl_xor (value[dstIdx], mask, BDIMX);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_array(int *d_out, int *d_in, int const offset)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
int lane = (offset + threadIdx.x) % SEGM;
value[0] = __shfl (value[3], lane, BDIMX);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_xor_plus(int *d_out, int *d_in, int const mask)
{
int value = d_in[threadIdx.x];
value += __shfl_xor (value, mask, BDIMX);
d_out[threadIdx.x] = value;
}
int main(int argc, char **argv)
{
int dev = 0;
bool iPrintout = 1;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("> %s Starting.", argv[0]);
printf("at Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
int nElem = BDIMX;
int h_inData[BDIMX], h_outData[BDIMX];
for (int i = 0; i < nElem; i++) h_inData[i] = i;
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
size_t nBytes = nElem * sizeof(int);
int *d_inData, *d_outData;
CHECK(cudaMalloc((int**)&d_inData, nBytes));
CHECK(cudaMalloc((int**)&d_outData, nBytes));
CHECK(cudaMemcpy(d_inData, h_inData, nBytes, cudaMemcpyHostToDevice));
int block = BDIMX;
// shfl bcast
test_shfl_broadcast<<<1, block>>>(d_outData, d_inData, 2);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl bcast\t\t: ");
printData(h_outData, nElem);
}
// shfl offset
test_shfl_wrap<<<1, block>>>(d_outData, d_inData, -2);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl wrap right\t\t: ");
printData(h_outData, nElem);
}
// shfl up
test_shfl_up<<<1, block>>>(d_outData, d_inData, 2);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl up \t\t: ");
printData(h_outData, nElem);
}
// shfl offset
test_shfl_wrap<<<1, block>>>(d_outData, d_inData, 2);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl wrap left\t\t: ");
printData(h_outData, nElem);
}
// shfl offset
test_shfl_wrap<<<1, block>>>(d_outData, d_inData, 2);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl wrap 2\t\t: ");
printData(h_outData, nElem);
}
// shfl down
test_shfl_down<<<1, block>>>(d_outData, d_inData, 2);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl down \t\t: ");
printData(h_outData, nElem);
}
// shfl xor
test_shfl_xor<<<1, block>>>(d_outData, d_inData, 1);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl xor 1\t\t: ");
printData(h_outData, nElem);
}
test_shfl_xor<<<1, block>>>(d_outData, d_inData, -8);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl xor -1\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - int4
test_shfl_xor_int4<<<1, block / SEGM>>>(d_outData, d_inData, 1);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl int4 1\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - register array
test_shfl_xor_array<<<1, block / SEGM>>>(d_outData, d_inData, 1);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl array 1\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - test_shfl_xor_element
test_shfl_xor_element<<<1, block / SEGM>>>(d_outData, d_inData, 1, 0, 3);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl idx \t\t: ");
printData(h_outData, nElem);
}
// shfl xor - swap
test_shfl_xor_array_swap_base<<<1, block / SEGM>>>(d_outData, d_inData, 1,
0, 3);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl swap base\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - swap
test_shfl_xor_array_swap<<<1, block / SEGM>>>(d_outData, d_inData, 1, 0, 3);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl swap 0 3\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - swap
test_shfl_swap<<<1, block / SEGM>>>(d_outData, d_inData, 1, 0, 3);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl swap inline\t: ");
printData(h_outData, nElem);
}
// shfl xor - register array
test_shfl_array<<<1, block / SEGM>>>(d_outData, d_inData, 1);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl array \t\t: ");
printData(h_outData, nElem);
}
// finishing
CHECK(cudaFree(d_inData));
CHECK(cudaFree(d_outData));
CHECK(cudaDeviceReset(); );
return EXIT_SUCCESS;
}
|
93aba9d2e3af00885cb1f85d771488f367cfb591.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#ifndef NDEBUG
#define CHECK_STATUS(status) \
if (status != hipSuccess) \
fprintf(stderr, "File: %s\nLine:%d Function:%s>>>%s\n", __FILE__, __LINE__, __FUNCTION__,\
hipGetErrorString(status))
#else
#define CHECK_STATUS(status) status
#endif
//////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void MyKernel(float* data){
}
int main(int argc, char **argv) {
int can;
CHECK_STATUS(hipDeviceCanAccessPeer(&can,0,1));
printf("peer-to-peer access%d",can);
size_t size = 1024 * sizeof(float);
CHECK_STATUS(hipSetDevice(0)); // 0
float* p0;
CHECK_STATUS(hipMalloc(&p0, size));// 0
CHECK_STATUS(hipSetDevice(1)); // 1
float* p1;
CHECK_STATUS(hipMalloc(&p1, size));// 1
CHECK_STATUS(hipSetDevice(0)); // 0
hipLaunchKernelGGL(( MyKernel), dim3(1000), dim3(128), 0, 0, p0); // 0MyKernel
CHECK_STATUS(hipGetLastError());
CHECK_STATUS(hipSetDevice(1)); // 1
// hipMemcpyPeer
// 1.
// 2.
CHECK_STATUS(hipMemcpyPeer(p1, 1, p0, 0, size)); // p0p1
hipLaunchKernelGGL(( MyKernel), dim3(1000), dim3(128), 0, 0, p1); // 1MyKernel
CHECK_STATUS(hipGetLastError());
CHECK_STATUS(hipFree(p0));
CHECK_STATUS(hipFree(p1));
return 0;
}
| 93aba9d2e3af00885cb1f85d771488f367cfb591.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#ifndef NDEBUG
#define CHECK_STATUS(status) \
if (status != cudaSuccess) \
fprintf(stderr, "File: %s\nLine:%d Function:%s>>>%s\n", __FILE__, __LINE__, __FUNCTION__,\
cudaGetErrorString(status))
#else
#define CHECK_STATUS(status) status
#endif
//////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void MyKernel(float* data){
}
int main(int argc, char **argv) {
int can;
CHECK_STATUS(cudaDeviceCanAccessPeer(&can,0,1));
printf("是否支持peer-to-peer access:%d",can);
size_t size = 1024 * sizeof(float);
CHECK_STATUS(cudaSetDevice(0)); // 选择设备0
float* p0;
CHECK_STATUS(cudaMalloc(&p0, size));// 在设备0上分配设备内存
CHECK_STATUS(cudaSetDevice(1)); // 选择设备1
float* p1;
CHECK_STATUS(cudaMalloc(&p1, size));// 在设备1上分配设备内存
CHECK_STATUS(cudaSetDevice(0)); // 选择设备0
MyKernel<<<1000, 128>>>(p0); // 在设备0上运行MyKernel
CHECK_STATUS(cudaGetLastError());
CHECK_STATUS(cudaSetDevice(1)); // 选择设备1
// cudaMemcpyPeer
// 1.之前的所有任务执行完之后才会被调用
// 2.完成之后才会执行后面的命令
CHECK_STATUS(cudaMemcpyPeer(p1, 1, p0, 0, size)); // 把p0复制到p1
MyKernel<<<1000, 128>>>(p1); // 在设备1上运行MyKernel
CHECK_STATUS(cudaGetLastError());
CHECK_STATUS(cudaFree(p0));
CHECK_STATUS(cudaFree(p1));
return 0;
}
|
08291f79b9b2866e9063733784745375a3b05917.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <math.h>
#include <cmath>
#include <stdio.h>
#include <string.h>
#include "Gaussian.h"
const int TILE_SIZE = 32;
bool checkForError(hipError_t error, char* funcName);
// Scale kernel takes in each row and divides all other elements in row with the index element. For eg: in the first row index element is (1,1)...2nd row it is (2,2)...so on..
// It divides all other elements in the row with their corresponding index elements
__global__ void ScaleRowKernel(float* in, float* out, unsigned int numRows, unsigned int numCols, int pivot){
int col = threadIdx.x + blockDim.x*blockIdx.x;
int row = threadIdx.y + blockDim.y*blockIdx.y;
if (row < numRows && col < numCols){
if (row == pivot && col != pivot) out[row*numCols + col] = in[row*numCols + col] / in[pivot*numCols + pivot]; // The index (pivot) element for scaling ..I'm reading it from the input matrix !!
if (row == pivot && col == pivot){ out[pivot*numCols + pivot] = 1; } // Instead of dividing the index element by itself in the row, I simply make it 1 because it has to be....
//else out[row*numCols + col] = in[row*numCols + col];
}
}
// Subtraction kernel takes in each row.... multiplies the multiplying factor with the elements of pivot row and subtracts the elements of other rows with the pivot row
// Similar implementation to CPU
__global__ void SubtractionKernel(float* in, float* out, unsigned int numRows, unsigned int numCols, int pivot){
int col = threadIdx.x + blockDim.x*blockIdx.x;
int row = threadIdx.y + blockDim.y*blockIdx.y;
if (row < numRows && col < numCols){
if (row != pivot){
out[row*numCols + col] = in[row*numCols + col] - (in[row*numCols + pivot] * in[pivot*numCols + col]);
}
else out[row*numCols + col] = in[pivot*numCols + col];
}
}
bool GaussianEliminationGPU(float** matrix, unsigned int numberOfRows, unsigned int numberOfColumns, float** outputMatrix, bool partialPivot){
hipError_t status;
bool error;
int bytes = numberOfRows * numberOfColumns * sizeof(float);
float scaleFactor = 0;
float* out;
float* in;
// Initializing the values
hipMalloc((void**)&out, bytes);
hipMalloc((void**)&in, bytes);
status = hipGetLastError();
error = checkForError(status, "At hipMalloc");
if (!error){
hipFree(out);
hipFree(in);
return false;
}
//transfer of data from host to device
for (int r = 0; r<numberOfRows; r++){
hipMemcpy(&out[r*numberOfColumns], matrix[r], numberOfColumns*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(&in[r*numberOfColumns], matrix[r], numberOfColumns*sizeof(float), hipMemcpyHostToDevice);
}
status = hipGetLastError();
error = checkForError(status, "At hipMemcpy");
if (!error){
hipFree(out);
hipFree(in);
return false;
}
// Defining Grid and block
dim3 dimBlock(TILE_SIZE, TILE_SIZE); // 2D block
status = hipGetLastError();
error = checkForError(status, "At dimBlock");
if (!error){
hipFree(out);
hipFree(in);
return false;
}
//Using 2 Dimensional grid
int gridSize1 = (int)ceil((((float)numberOfColumns) / (TILE_SIZE)));
int gridSize2 = (int)ceil((((float)numberOfRows) / (TILE_SIZE)));
dim3 dimGrid(gridSize1, gridSize2);
status = hipGetLastError();
error = checkForError(status, "At dimGrid");
if (!error){
hipFree(out);
hipFree(in);
return false;
}
//Scaling each row and performing elementary row wise operations
for (int p = 0; p<numberOfRows; p++){ // p is the pivot row
ScaleRowKernel << < dimGrid, dimBlock >> >(in, out, numberOfRows, numberOfColumns, p);
hipDeviceSynchronize();
hipDeviceSynchronize();
status = hipGetLastError();
error = checkForError(status, "At ScaleRowKernel");
if (!error){
hipFree(out);
hipFree(in);
return false;
}
// After each operation on a row, I'm copying the output matrix (out) into input matrix (in)..so that next scaling row will have updated input matrix
hipMemcpy(in, out, numberOfRows*numberOfColumns*sizeof(float), hipMemcpyDeviceToDevice);
SubtractionKernel << < dimGrid, dimBlock >> >(in, out, numberOfRows, numberOfColumns, p);
hipDeviceSynchronize();
hipDeviceSynchronize();
status = hipGetLastError();
error = checkForError(status, "At SubtractionKernel");
if (!error){
printf("Error at Subtraction kernel row number : %d", p);
hipFree(out);
hipFree(in);
return false;
}
// After each operation on a row, I'm copying the output matrix (out) into input matrix (in)..so that next scaling row will have updated input matrix
hipMemcpy(in, out, numberOfRows*numberOfColumns*sizeof(float), hipMemcpyDeviceToDevice);
hipDeviceSynchronize();
hipDeviceSynchronize();
}
for (int r = 0; r<numberOfRows; r++){
hipMemcpy(outputMatrix[r], &out[r*numberOfColumns],
numberOfColumns*sizeof(float), hipMemcpyDeviceToHost);
}
status = hipGetLastError();
error = checkForError(status, "After cudaMemcopy to host");
if (!error){
hipFree(out);
hipFree(in);
return false;
}
hipFree(out);
hipFree(in);
return true;
}
bool checkForError(hipError_t error, char* funcName){
bool success;
if (error != hipSuccess){
success = false;
printf("Device error at %s \n", funcName);
printf("%s", hipGetErrorString);
}
else{
success = true;
}
return success;
}
| 08291f79b9b2866e9063733784745375a3b05917.cu | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <math.h>
#include <cmath>
#include <stdio.h>
#include <string.h>
#include "Gaussian.h"
const int TILE_SIZE = 32;
bool checkForError(cudaError_t error, char* funcName);
// Scale kernel takes in each row and divides all other elements in row with the index element. For eg: in the first row index element is (1,1)...2nd row it is (2,2)...so on..
// It divides all other elements in the row with their corresponding index elements
__global__ void ScaleRowKernel(float* in, float* out, unsigned int numRows, unsigned int numCols, int pivot){
int col = threadIdx.x + blockDim.x*blockIdx.x;
int row = threadIdx.y + blockDim.y*blockIdx.y;
if (row < numRows && col < numCols){
if (row == pivot && col != pivot) out[row*numCols + col] = in[row*numCols + col] / in[pivot*numCols + pivot]; // The index (pivot) element for scaling ..I'm reading it from the input matrix !!
if (row == pivot && col == pivot){ out[pivot*numCols + pivot] = 1; } // Instead of dividing the index element by itself in the row, I simply make it 1 because it has to be....
//else out[row*numCols + col] = in[row*numCols + col];
}
}
// Subtraction kernel takes in each row.... multiplies the multiplying factor with the elements of pivot row and subtracts the elements of other rows with the pivot row
// Similar implementation to CPU
__global__ void SubtractionKernel(float* in, float* out, unsigned int numRows, unsigned int numCols, int pivot){
int col = threadIdx.x + blockDim.x*blockIdx.x;
int row = threadIdx.y + blockDim.y*blockIdx.y;
if (row < numRows && col < numCols){
if (row != pivot){
out[row*numCols + col] = in[row*numCols + col] - (in[row*numCols + pivot] * in[pivot*numCols + col]);
}
else out[row*numCols + col] = in[pivot*numCols + col];
}
}
bool GaussianEliminationGPU(float** matrix, unsigned int numberOfRows, unsigned int numberOfColumns, float** outputMatrix, bool partialPivot){
cudaError_t status;
bool error;
int bytes = numberOfRows * numberOfColumns * sizeof(float);
float scaleFactor = 0;
float* out;
float* in;
// Initializing the values
cudaMalloc((void**)&out, bytes);
cudaMalloc((void**)&in, bytes);
status = cudaGetLastError();
error = checkForError(status, "At cudaMalloc");
if (!error){
cudaFree(out);
cudaFree(in);
return false;
}
//transfer of data from host to device
for (int r = 0; r<numberOfRows; r++){
cudaMemcpy(&out[r*numberOfColumns], matrix[r], numberOfColumns*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(&in[r*numberOfColumns], matrix[r], numberOfColumns*sizeof(float), cudaMemcpyHostToDevice);
}
status = cudaGetLastError();
error = checkForError(status, "At cudaMemcpy");
if (!error){
cudaFree(out);
cudaFree(in);
return false;
}
// Defining Grid and block
dim3 dimBlock(TILE_SIZE, TILE_SIZE); // 2D block
status = cudaGetLastError();
error = checkForError(status, "At dimBlock");
if (!error){
cudaFree(out);
cudaFree(in);
return false;
}
//Using 2 Dimensional grid
int gridSize1 = (int)ceil((((float)numberOfColumns) / (TILE_SIZE)));
int gridSize2 = (int)ceil((((float)numberOfRows) / (TILE_SIZE)));
dim3 dimGrid(gridSize1, gridSize2);
status = cudaGetLastError();
error = checkForError(status, "At dimGrid");
if (!error){
cudaFree(out);
cudaFree(in);
return false;
}
//Scaling each row and performing elementary row wise operations
for (int p = 0; p<numberOfRows; p++){ // p is the pivot row
ScaleRowKernel << < dimGrid, dimBlock >> >(in, out, numberOfRows, numberOfColumns, p);
cudaDeviceSynchronize();
cudaThreadSynchronize();
status = cudaGetLastError();
error = checkForError(status, "At ScaleRowKernel");
if (!error){
cudaFree(out);
cudaFree(in);
return false;
}
// After each operation on a row, I'm copying the output matrix (out) into input matrix (in)..so that next scaling row will have updated input matrix
cudaMemcpy(in, out, numberOfRows*numberOfColumns*sizeof(float), cudaMemcpyDeviceToDevice);
SubtractionKernel << < dimGrid, dimBlock >> >(in, out, numberOfRows, numberOfColumns, p);
cudaDeviceSynchronize();
cudaThreadSynchronize();
status = cudaGetLastError();
error = checkForError(status, "At SubtractionKernel");
if (!error){
printf("Error at Subtraction kernel row number : %d", p);
cudaFree(out);
cudaFree(in);
return false;
}
// After each operation on a row, I'm copying the output matrix (out) into input matrix (in)..so that next scaling row will have updated input matrix
cudaMemcpy(in, out, numberOfRows*numberOfColumns*sizeof(float), cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
cudaThreadSynchronize();
}
for (int r = 0; r<numberOfRows; r++){
cudaMemcpy(outputMatrix[r], &out[r*numberOfColumns],
numberOfColumns*sizeof(float), cudaMemcpyDeviceToHost);
}
status = cudaGetLastError();
error = checkForError(status, "After cudaMemcopy to host");
if (!error){
cudaFree(out);
cudaFree(in);
return false;
}
cudaFree(out);
cudaFree(in);
return true;
}
bool checkForError(cudaError_t error, char* funcName){
bool success;
if (error != cudaSuccess){
success = false;
printf("Device error at %s \n", funcName);
printf("%s", cudaGetErrorString);
}
else{
success = true;
}
return success;
}
|
e92ab2dc8235ce637c55903956a5e004a23c3998.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// SPDX-FileCopyrightText: 2021 CERN
// SPDX-License-Identifier: Apache-2.0
#include "TestEm3.cuh"
#include <AdePT/LoopNavigator.h>
#include <fieldPropagatorConstBz.h>
#include <CopCore/PhysicalConstants.h>
#include <G4HepEmElectronManager.hh>
#include <G4HepEmElectronTrack.hh>
#include <G4HepEmElectronInteractionBrem.hh>
#include <G4HepEmElectronInteractionIoni.hh>
#include <G4HepEmPositronInteractionAnnihilation.hh>
// Pull in implementation.
#include <G4HepEmRunUtils.icc>
#include <G4HepEmInteractionUtils.icc>
#include <G4HepEmElectronManager.icc>
#include <G4HepEmElectronInteractionBrem.icc>
#include <G4HepEmElectronInteractionIoni.icc>
#include <G4HepEmPositronInteractionAnnihilation.icc>
__device__ struct G4HepEmElectronManager electronManager;
// Compute the physics and geometry step limit, transport the electrons while
// applying the continuous effects and maybe a discrete process that could
// generate secondaries.
template <bool IsElectron>
static __device__ __forceinline__ void TransportElectrons(Track *electrons, const adept::MParray *active,
Secondaries &secondaries, adept::MParray *activeQueue,
adept::MParray *relocateQueue, GlobalScoring *globalScoring,
ScoringPerVolume *scoringPerVolume)
{
constexpr int Charge = IsElectron ? -1 : 1;
constexpr double Mass = copcore::units::kElectronMassC2;
fieldPropagatorConstBz fieldPropagatorBz(BzFieldValue);
int activeSize = active->size();
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < activeSize; i += blockDim.x * gridDim.x) {
const int slot = (*active)[i];
Track ¤tTrack = electrons[slot];
auto volume = currentTrack.currentState.Top();
if (volume == nullptr) {
// The particle left the world, kill it by not enqueuing into activeQueue.
continue;
}
int volumeID = volume->id();
int theMCIndex = MCIndex[volumeID];
// Init a track with the needed data to call into G4HepEm.
G4HepEmElectronTrack elTrack;
G4HepEmTrack *theTrack = elTrack.GetTrack();
theTrack->SetEKin(currentTrack.energy);
theTrack->SetMCIndex(theMCIndex);
theTrack->SetCharge(Charge);
// Sample the `number-of-interaction-left` and put it into the track.
for (int ip = 0; ip < 3; ++ip) {
double numIALeft = currentTrack.numIALeft[ip];
if (numIALeft <= 0) {
numIALeft = -::log(currentTrack.Uniform());
currentTrack.numIALeft[ip] = numIALeft;
}
theTrack->SetNumIALeft(numIALeft, ip);
}
// Call G4HepEm to compute the physics step limit.
electronManager.HowFar(&g4HepEmData, &g4HepEmPars, &elTrack);
// Get result into variables.
double geometricalStepLengthFromPhysics = theTrack->GetGStepLength();
// The phyiscal step length is the amount that the particle experiences
// which might be longer than the geometrical step length due to MSC. As
// long as we call PerformContinuous in the same kernel we don't need to
// care, but we need to make this available when splitting the operations.
// double physicalStepLength = elTrack.GetPStepLength();
int winnerProcessIndex = theTrack->GetWinnerProcessIndex();
// Leave the range and MFP inside the G4HepEmTrack. If we split kernels, we
// also need to carry them over!
// Check if there's a volume boundary in between.
double geometryStepLength;
if (BzFieldValue != 0) {
geometryStepLength = fieldPropagatorBz.ComputeStepAndPropagatedState</*Relocate=*/false>(
currentTrack.energy, Mass, Charge, geometricalStepLengthFromPhysics, currentTrack.pos, currentTrack.dir,
currentTrack.currentState, currentTrack.nextState);
} else {
geometryStepLength =
LoopNavigator::ComputeStepAndNextVolume(currentTrack.pos, currentTrack.dir, geometricalStepLengthFromPhysics,
currentTrack.currentState, currentTrack.nextState);
currentTrack.pos += (geometryStepLength + kPush) * currentTrack.dir;
}
atomicAdd(&globalScoring->chargedSteps, 1);
atomicAdd(&scoringPerVolume->chargedTrackLength[volumeID], geometryStepLength);
if (currentTrack.nextState.IsOnBoundary()) {
theTrack->SetGStepLength(geometryStepLength);
theTrack->SetOnBoundary(true);
}
// Apply continuous effects.
bool stopped = electronManager.PerformContinuous(&g4HepEmData, &g4HepEmPars, &elTrack);
// Collect the changes.
currentTrack.energy = theTrack->GetEKin();
double energyDeposit = theTrack->GetEnergyDeposit();
atomicAdd(&globalScoring->energyDeposit, energyDeposit);
atomicAdd(&scoringPerVolume->energyDeposit[volumeID], energyDeposit);
// Save the `number-of-interaction-left` in our track.
for (int ip = 0; ip < 3; ++ip) {
double numIALeft = theTrack->GetNumIALeft(ip);
currentTrack.numIALeft[ip] = numIALeft;
}
if (stopped) {
if (!IsElectron) {
// Annihilate the stopped positron into two gammas heading to opposite
// directions (isotropic).
Track &gamma1 = secondaries.gammas.NextTrack();
Track &gamma2 = secondaries.gammas.NextTrack();
atomicAdd(&globalScoring->numGammas, 2);
const double cost = 2 * currentTrack.Uniform() - 1;
const double sint = sqrt(1 - cost * cost);
const double phi = k2Pi * currentTrack.Uniform();
double sinPhi, cosPhi;
sincos(phi, &sinPhi, &cosPhi);
gamma1.InitAsSecondary(/*parent=*/currentTrack);
gamma1.energy = copcore::units::kElectronMassC2;
gamma1.dir.Set(sint * cosPhi, sint * sinPhi, cost);
gamma2.InitAsSecondary(/*parent=*/currentTrack);
gamma2.energy = copcore::units::kElectronMassC2;
gamma2.dir = -gamma1.dir;
}
// Particles are killed by not enqueuing them into the new activeQueue.
continue;
}
if (currentTrack.nextState.IsOnBoundary()) {
// For now, just count that we hit something.
atomicAdd(&globalScoring->hits, 1);
activeQueue->push_back(slot);
relocateQueue->push_back(slot);
// Move to the next boundary.
currentTrack.SwapStates();
continue;
} else if (winnerProcessIndex < 0) {
// No discrete process, move on.
activeQueue->push_back(slot);
continue;
}
// Reset number of interaction left for the winner discrete process.
// (Will be resampled in the next iteration.)
currentTrack.numIALeft[winnerProcessIndex] = -1.0;
// Check if a delta interaction happens instead of the real discrete process.
if (electronManager.CheckDelta(&g4HepEmData, theTrack, currentTrack.Uniform())) {
// A delta interaction happened, move on.
activeQueue->push_back(slot);
continue;
}
// Perform the discrete interaction.
RanluxppDoubleEngine rnge(¤tTrack.rngState);
const double energy = currentTrack.energy;
const double theElCut = g4HepEmData.fTheMatCutData->fMatCutData[theMCIndex].fSecElProdCutE;
switch (winnerProcessIndex) {
case 0: {
// Invoke ionization (for e-/e+):
double deltaEkin = (IsElectron) ? SampleETransferMoller(theElCut, energy, &rnge)
: SampleETransferBhabha(theElCut, energy, &rnge);
double dirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()};
double dirSecondary[3];
SampleDirectionsIoni(energy, deltaEkin, dirSecondary, dirPrimary, &rnge);
Track &secondary = secondaries.electrons.NextTrack();
atomicAdd(&globalScoring->numElectrons, 1);
secondary.InitAsSecondary(/*parent=*/currentTrack);
secondary.energy = deltaEkin;
secondary.dir.Set(dirSecondary[0], dirSecondary[1], dirSecondary[2]);
currentTrack.energy = energy - deltaEkin;
currentTrack.dir.Set(dirPrimary[0], dirPrimary[1], dirPrimary[2]);
// The current track continues to live.
activeQueue->push_back(slot);
break;
}
case 1: {
// Invoke model for Bremsstrahlung: either SB- or Rel-Brem.
double logEnergy = ::log(energy);
double deltaEkin = energy < g4HepEmPars.fElectronBremModelLim
? SampleETransferBremSB(&g4HepEmData, energy, logEnergy, theMCIndex, &rnge, IsElectron)
: SampleETransferBremRB(&g4HepEmData, energy, logEnergy, theMCIndex, &rnge, IsElectron);
double dirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()};
double dirSecondary[3];
SampleDirectionsBrem(energy, deltaEkin, dirSecondary, dirPrimary, &rnge);
Track &gamma = secondaries.gammas.NextTrack();
atomicAdd(&globalScoring->numGammas, 1);
gamma.InitAsSecondary(/*parent=*/currentTrack);
gamma.energy = deltaEkin;
gamma.dir.Set(dirSecondary[0], dirSecondary[1], dirSecondary[2]);
currentTrack.energy = energy - deltaEkin;
currentTrack.dir.Set(dirPrimary[0], dirPrimary[1], dirPrimary[2]);
// The current track continues to live.
activeQueue->push_back(slot);
break;
}
case 2: {
// Invoke annihilation (in-flight) for e+
double dirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()};
double theGamma1Ekin, theGamma2Ekin;
double theGamma1Dir[3], theGamma2Dir[3];
SampleEnergyAndDirectionsForAnnihilationInFlight(energy, dirPrimary, &theGamma1Ekin, theGamma1Dir, &theGamma2Ekin,
theGamma2Dir, &rnge);
Track &gamma1 = secondaries.gammas.NextTrack();
Track &gamma2 = secondaries.gammas.NextTrack();
atomicAdd(&globalScoring->numGammas, 2);
gamma1.InitAsSecondary(/*parent=*/currentTrack);
gamma1.energy = theGamma1Ekin;
gamma1.dir.Set(theGamma1Dir[0], theGamma1Dir[1], theGamma1Dir[2]);
gamma2.InitAsSecondary(/*parent=*/currentTrack);
gamma2.energy = theGamma2Ekin;
gamma2.dir.Set(theGamma2Dir[0], theGamma2Dir[1], theGamma2Dir[2]);
// The current track is killed by not enqueuing into the next activeQueue.
break;
}
}
}
}
// Instantiate kernels for electrons and positrons.
__global__ void TransportElectrons(Track *electrons, const adept::MParray *active, Secondaries secondaries,
adept::MParray *activeQueue, adept::MParray *relocateQueue,
GlobalScoring *globalScoring, ScoringPerVolume *scoringPerVolume)
{
TransportElectrons</*IsElectron*/ true>(electrons, active, secondaries, activeQueue, relocateQueue, globalScoring,
scoringPerVolume);
}
__global__ void TransportPositrons(Track *positrons, const adept::MParray *active, Secondaries secondaries,
adept::MParray *activeQueue, adept::MParray *relocateQueue,
GlobalScoring *globalScoring, ScoringPerVolume *scoringPerVolume)
{
TransportElectrons</*IsElectron*/ false>(positrons, active, secondaries, activeQueue, relocateQueue, globalScoring,
scoringPerVolume);
}
| e92ab2dc8235ce637c55903956a5e004a23c3998.cu | // SPDX-FileCopyrightText: 2021 CERN
// SPDX-License-Identifier: Apache-2.0
#include "TestEm3.cuh"
#include <AdePT/LoopNavigator.h>
#include <fieldPropagatorConstBz.h>
#include <CopCore/PhysicalConstants.h>
#include <G4HepEmElectronManager.hh>
#include <G4HepEmElectronTrack.hh>
#include <G4HepEmElectronInteractionBrem.hh>
#include <G4HepEmElectronInteractionIoni.hh>
#include <G4HepEmPositronInteractionAnnihilation.hh>
// Pull in implementation.
#include <G4HepEmRunUtils.icc>
#include <G4HepEmInteractionUtils.icc>
#include <G4HepEmElectronManager.icc>
#include <G4HepEmElectronInteractionBrem.icc>
#include <G4HepEmElectronInteractionIoni.icc>
#include <G4HepEmPositronInteractionAnnihilation.icc>
__device__ struct G4HepEmElectronManager electronManager;
// Compute the physics and geometry step limit, transport the electrons while
// applying the continuous effects and maybe a discrete process that could
// generate secondaries.
template <bool IsElectron>
static __device__ __forceinline__ void TransportElectrons(Track *electrons, const adept::MParray *active,
Secondaries &secondaries, adept::MParray *activeQueue,
adept::MParray *relocateQueue, GlobalScoring *globalScoring,
ScoringPerVolume *scoringPerVolume)
{
constexpr int Charge = IsElectron ? -1 : 1;
constexpr double Mass = copcore::units::kElectronMassC2;
fieldPropagatorConstBz fieldPropagatorBz(BzFieldValue);
int activeSize = active->size();
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < activeSize; i += blockDim.x * gridDim.x) {
const int slot = (*active)[i];
Track ¤tTrack = electrons[slot];
auto volume = currentTrack.currentState.Top();
if (volume == nullptr) {
// The particle left the world, kill it by not enqueuing into activeQueue.
continue;
}
int volumeID = volume->id();
int theMCIndex = MCIndex[volumeID];
// Init a track with the needed data to call into G4HepEm.
G4HepEmElectronTrack elTrack;
G4HepEmTrack *theTrack = elTrack.GetTrack();
theTrack->SetEKin(currentTrack.energy);
theTrack->SetMCIndex(theMCIndex);
theTrack->SetCharge(Charge);
// Sample the `number-of-interaction-left` and put it into the track.
for (int ip = 0; ip < 3; ++ip) {
double numIALeft = currentTrack.numIALeft[ip];
if (numIALeft <= 0) {
numIALeft = -std::log(currentTrack.Uniform());
currentTrack.numIALeft[ip] = numIALeft;
}
theTrack->SetNumIALeft(numIALeft, ip);
}
// Call G4HepEm to compute the physics step limit.
electronManager.HowFar(&g4HepEmData, &g4HepEmPars, &elTrack);
// Get result into variables.
double geometricalStepLengthFromPhysics = theTrack->GetGStepLength();
// The phyiscal step length is the amount that the particle experiences
// which might be longer than the geometrical step length due to MSC. As
// long as we call PerformContinuous in the same kernel we don't need to
// care, but we need to make this available when splitting the operations.
// double physicalStepLength = elTrack.GetPStepLength();
int winnerProcessIndex = theTrack->GetWinnerProcessIndex();
// Leave the range and MFP inside the G4HepEmTrack. If we split kernels, we
// also need to carry them over!
// Check if there's a volume boundary in between.
double geometryStepLength;
if (BzFieldValue != 0) {
geometryStepLength = fieldPropagatorBz.ComputeStepAndPropagatedState</*Relocate=*/false>(
currentTrack.energy, Mass, Charge, geometricalStepLengthFromPhysics, currentTrack.pos, currentTrack.dir,
currentTrack.currentState, currentTrack.nextState);
} else {
geometryStepLength =
LoopNavigator::ComputeStepAndNextVolume(currentTrack.pos, currentTrack.dir, geometricalStepLengthFromPhysics,
currentTrack.currentState, currentTrack.nextState);
currentTrack.pos += (geometryStepLength + kPush) * currentTrack.dir;
}
atomicAdd(&globalScoring->chargedSteps, 1);
atomicAdd(&scoringPerVolume->chargedTrackLength[volumeID], geometryStepLength);
if (currentTrack.nextState.IsOnBoundary()) {
theTrack->SetGStepLength(geometryStepLength);
theTrack->SetOnBoundary(true);
}
// Apply continuous effects.
bool stopped = electronManager.PerformContinuous(&g4HepEmData, &g4HepEmPars, &elTrack);
// Collect the changes.
currentTrack.energy = theTrack->GetEKin();
double energyDeposit = theTrack->GetEnergyDeposit();
atomicAdd(&globalScoring->energyDeposit, energyDeposit);
atomicAdd(&scoringPerVolume->energyDeposit[volumeID], energyDeposit);
// Save the `number-of-interaction-left` in our track.
for (int ip = 0; ip < 3; ++ip) {
double numIALeft = theTrack->GetNumIALeft(ip);
currentTrack.numIALeft[ip] = numIALeft;
}
if (stopped) {
if (!IsElectron) {
// Annihilate the stopped positron into two gammas heading to opposite
// directions (isotropic).
Track &gamma1 = secondaries.gammas.NextTrack();
Track &gamma2 = secondaries.gammas.NextTrack();
atomicAdd(&globalScoring->numGammas, 2);
const double cost = 2 * currentTrack.Uniform() - 1;
const double sint = sqrt(1 - cost * cost);
const double phi = k2Pi * currentTrack.Uniform();
double sinPhi, cosPhi;
sincos(phi, &sinPhi, &cosPhi);
gamma1.InitAsSecondary(/*parent=*/currentTrack);
gamma1.energy = copcore::units::kElectronMassC2;
gamma1.dir.Set(sint * cosPhi, sint * sinPhi, cost);
gamma2.InitAsSecondary(/*parent=*/currentTrack);
gamma2.energy = copcore::units::kElectronMassC2;
gamma2.dir = -gamma1.dir;
}
// Particles are killed by not enqueuing them into the new activeQueue.
continue;
}
if (currentTrack.nextState.IsOnBoundary()) {
// For now, just count that we hit something.
atomicAdd(&globalScoring->hits, 1);
activeQueue->push_back(slot);
relocateQueue->push_back(slot);
// Move to the next boundary.
currentTrack.SwapStates();
continue;
} else if (winnerProcessIndex < 0) {
// No discrete process, move on.
activeQueue->push_back(slot);
continue;
}
// Reset number of interaction left for the winner discrete process.
// (Will be resampled in the next iteration.)
currentTrack.numIALeft[winnerProcessIndex] = -1.0;
// Check if a delta interaction happens instead of the real discrete process.
if (electronManager.CheckDelta(&g4HepEmData, theTrack, currentTrack.Uniform())) {
// A delta interaction happened, move on.
activeQueue->push_back(slot);
continue;
}
// Perform the discrete interaction.
RanluxppDoubleEngine rnge(¤tTrack.rngState);
const double energy = currentTrack.energy;
const double theElCut = g4HepEmData.fTheMatCutData->fMatCutData[theMCIndex].fSecElProdCutE;
switch (winnerProcessIndex) {
case 0: {
// Invoke ionization (for e-/e+):
double deltaEkin = (IsElectron) ? SampleETransferMoller(theElCut, energy, &rnge)
: SampleETransferBhabha(theElCut, energy, &rnge);
double dirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()};
double dirSecondary[3];
SampleDirectionsIoni(energy, deltaEkin, dirSecondary, dirPrimary, &rnge);
Track &secondary = secondaries.electrons.NextTrack();
atomicAdd(&globalScoring->numElectrons, 1);
secondary.InitAsSecondary(/*parent=*/currentTrack);
secondary.energy = deltaEkin;
secondary.dir.Set(dirSecondary[0], dirSecondary[1], dirSecondary[2]);
currentTrack.energy = energy - deltaEkin;
currentTrack.dir.Set(dirPrimary[0], dirPrimary[1], dirPrimary[2]);
// The current track continues to live.
activeQueue->push_back(slot);
break;
}
case 1: {
// Invoke model for Bremsstrahlung: either SB- or Rel-Brem.
double logEnergy = std::log(energy);
double deltaEkin = energy < g4HepEmPars.fElectronBremModelLim
? SampleETransferBremSB(&g4HepEmData, energy, logEnergy, theMCIndex, &rnge, IsElectron)
: SampleETransferBremRB(&g4HepEmData, energy, logEnergy, theMCIndex, &rnge, IsElectron);
double dirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()};
double dirSecondary[3];
SampleDirectionsBrem(energy, deltaEkin, dirSecondary, dirPrimary, &rnge);
Track &gamma = secondaries.gammas.NextTrack();
atomicAdd(&globalScoring->numGammas, 1);
gamma.InitAsSecondary(/*parent=*/currentTrack);
gamma.energy = deltaEkin;
gamma.dir.Set(dirSecondary[0], dirSecondary[1], dirSecondary[2]);
currentTrack.energy = energy - deltaEkin;
currentTrack.dir.Set(dirPrimary[0], dirPrimary[1], dirPrimary[2]);
// The current track continues to live.
activeQueue->push_back(slot);
break;
}
case 2: {
// Invoke annihilation (in-flight) for e+
double dirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()};
double theGamma1Ekin, theGamma2Ekin;
double theGamma1Dir[3], theGamma2Dir[3];
SampleEnergyAndDirectionsForAnnihilationInFlight(energy, dirPrimary, &theGamma1Ekin, theGamma1Dir, &theGamma2Ekin,
theGamma2Dir, &rnge);
Track &gamma1 = secondaries.gammas.NextTrack();
Track &gamma2 = secondaries.gammas.NextTrack();
atomicAdd(&globalScoring->numGammas, 2);
gamma1.InitAsSecondary(/*parent=*/currentTrack);
gamma1.energy = theGamma1Ekin;
gamma1.dir.Set(theGamma1Dir[0], theGamma1Dir[1], theGamma1Dir[2]);
gamma2.InitAsSecondary(/*parent=*/currentTrack);
gamma2.energy = theGamma2Ekin;
gamma2.dir.Set(theGamma2Dir[0], theGamma2Dir[1], theGamma2Dir[2]);
// The current track is killed by not enqueuing into the next activeQueue.
break;
}
}
}
}
// Instantiate kernels for electrons and positrons.
__global__ void TransportElectrons(Track *electrons, const adept::MParray *active, Secondaries secondaries,
adept::MParray *activeQueue, adept::MParray *relocateQueue,
GlobalScoring *globalScoring, ScoringPerVolume *scoringPerVolume)
{
TransportElectrons</*IsElectron*/ true>(electrons, active, secondaries, activeQueue, relocateQueue, globalScoring,
scoringPerVolume);
}
__global__ void TransportPositrons(Track *positrons, const adept::MParray *active, Secondaries secondaries,
adept::MParray *activeQueue, adept::MParray *relocateQueue,
GlobalScoring *globalScoring, ScoringPerVolume *scoringPerVolume)
{
TransportElectrons</*IsElectron*/ false>(positrons, active, secondaries, activeQueue, relocateQueue, globalScoring,
scoringPerVolume);
}
|
e652b8424467d3da6c8b1259a230bd8fd8f1df3e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "wireless_src_pulse_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int step = 1;
double amp = 1;
double MAX_TIME = 1;
double TIME_STEP = 1;
int radius = 1;
int source_active = 1;
int src_x = 1;
int src_y = 1;
double *ua_gpu = NULL;
hipMalloc(&ua_gpu, XSIZE*YSIZE);
double *ub_gpu = NULL;
hipMalloc(&ub_gpu, XSIZE*YSIZE);
double *uc_gpu = NULL;
hipMalloc(&uc_gpu, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
wireless_src_pulse_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, step,amp,MAX_TIME,TIME_STEP,radius,source_active,src_x,src_y,ua_gpu,ub_gpu,uc_gpu);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
wireless_src_pulse_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, step,amp,MAX_TIME,TIME_STEP,radius,source_active,src_x,src_y,ua_gpu,ub_gpu,uc_gpu);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
wireless_src_pulse_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, step,amp,MAX_TIME,TIME_STEP,radius,source_active,src_x,src_y,ua_gpu,ub_gpu,uc_gpu);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e652b8424467d3da6c8b1259a230bd8fd8f1df3e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "wireless_src_pulse_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int step = 1;
double amp = 1;
double MAX_TIME = 1;
double TIME_STEP = 1;
int radius = 1;
int source_active = 1;
int src_x = 1;
int src_y = 1;
double *ua_gpu = NULL;
cudaMalloc(&ua_gpu, XSIZE*YSIZE);
double *ub_gpu = NULL;
cudaMalloc(&ub_gpu, XSIZE*YSIZE);
double *uc_gpu = NULL;
cudaMalloc(&uc_gpu, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
wireless_src_pulse_kernel<<<gridBlock,threadBlock>>>(step,amp,MAX_TIME,TIME_STEP,radius,source_active,src_x,src_y,ua_gpu,ub_gpu,uc_gpu);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
wireless_src_pulse_kernel<<<gridBlock,threadBlock>>>(step,amp,MAX_TIME,TIME_STEP,radius,source_active,src_x,src_y,ua_gpu,ub_gpu,uc_gpu);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
wireless_src_pulse_kernel<<<gridBlock,threadBlock>>>(step,amp,MAX_TIME,TIME_STEP,radius,source_active,src_x,src_y,ua_gpu,ub_gpu,uc_gpu);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
3994cbcd8586732c36c3cc66ee9b38f193efe0a1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void curvi (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3) {
for (int i=2; i<=N-3; i++) {
double _t_7_;
double _t_4_;
double _t_5_;
double _t_2_;
double _t_0_;
double _t_10_;
double _t_11_;
double _t_16_;
double _t_17_;
double _t_15_;
double _t_26_;
double _t_23_;
double _t_24_;
double _t_21_;
double _t_29_;
double _t_30_;
double _t_35_;
double _t_36_;
double _t_34_;
double _t_46_;
double _t_43_;
double _t_44_;
double _t_41_;
double _t_49_;
double _t_50_;
double _t_55_;
double _t_56_;
double _t_54_;
double _t_65_;
double _t_62_;
double _t_63_;
double _t_60_;
double _t_68_;
double _t_69_;
double _t_74_;
double _t_75_;
double _t_73_;
double r1ic0jc0kc0 = r1[i][j][k];
double _t_86_;
double _t_83_;
double _t_84_;
double _t_81_;
double _t_90_;
double _t_91_;
double _t_95_;
double _t_96_;
double _t_104_;
double _t_101_;
double _t_102_;
double _t_108_;
double _t_109_;
double _t_113_;
double _t_114_;
double _t_79_;
double _t_123_;
double _t_120_;
double _t_121_;
double _t_118_;
double _t_127_;
double _t_128_;
double _t_132_;
double _t_133_;
double _t_141_;
double _t_138_;
double _t_139_;
double _t_145_;
double _t_146_;
double _t_150_;
double _t_151_;
double _t_159_;
double _t_160_;
double _t_157_;
double _t_155_;
double _t_164_;
double _t_165_;
double _t_171_;
double _t_172_;
double _t_169_;
double _t_176_;
double _t_177_;
double _t_184_;
double _t_185_;
double _t_182_;
double _t_189_;
double _t_190_;
double _t_196_;
double _t_197_;
double _t_194_;
double _t_201_;
double _t_202_;
_t_7_ = 2.0 * mu[i][j][k+2];
_t_7_ += la[i][j][k+2];
_t_4_ = met1[i][j][k+2] * _t_7_ * met2[i][j][k+2];
_t_5_ = c2 * u1[i+2][j][k+2];
_t_5_ -= c2 * u1[i-2][j][k+2];
_t_5_ += c1 * u1[i+1][j][k+2];
_t_5_ -= c1 * u1[i-1][j][k+2];
_t_2_ = strx[i] * _t_4_ * _t_5_;
_t_0_ = c2 * _t_2_ * stry[j];
_t_10_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2];
_t_11_ = c2 * u2[i+2][j][k+2];
_t_11_ -= c2 * u2[i-2][j][k+2];
_t_11_ += c1 * u2[i+1][j][k+2];
_t_11_ -= c1 * u2[i-1][j][k+2];
_t_0_ += c2 * _t_10_ * _t_11_;
_t_16_ = met1[i][j][k+2] * mu[i][j][k+2] * met4[i][j][k+2];
_t_17_ = c2 * u3[i+2][j][k+2];
_t_17_ -= c2 * u3[i-2][j][k+2];
_t_17_ += c1 * u3[i+1][j][k+2];
_t_17_ -= c1 * u3[i-1][j][k+2];
_t_15_ = _t_16_ * _t_17_;
_t_0_ += c2 * _t_15_ * stry[j];
_t_26_ = 2.0 * mu[i][j][k-2];
_t_26_ += la[i][j][k-2];
_t_23_ = met1[i][j][k-2] * _t_26_ * met2[i][j][k-2];
_t_24_ = c2 * u1[i+2][j][k-2];
_t_24_ -= c2 * u1[i-2][j][k-2];
_t_24_ += c1 * u1[i+1][j][k-2];
_t_24_ -= c1 * u1[i-1][j][k-2];
_t_21_ = strx[i] * _t_23_ * _t_24_;
_t_0_ += c2 * _t_21_ * stry[j];
_t_29_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2];
_t_30_ = c2 * u2[i+2][j][k-2];
_t_30_ -= c2 * u2[i-2][j][k-2];
_t_30_ += c1 * u2[i+1][j][k-2];
_t_30_ -= c1 * u2[i-1][j][k-2];
_t_0_ += c2 * _t_29_ * _t_30_;
_t_35_ = met1[i][j][k-2] * mu[i][j][k-2] * met4[i][j][k-2];
_t_36_ = c2 * u3[i+2][j][k-2];
_t_36_ -= c2 * u3[i-2][j][k-2];
_t_36_ += c1 * u3[i+1][j][k-2];
_t_36_ -= c1 * u3[i-1][j][k-2];
_t_34_ = _t_35_ * _t_36_;
_t_0_ += c2 * _t_34_ * stry[j];
_t_46_ = 2.0 * mu[i][j][k+1];
_t_46_ += la[i][j][k+1];
_t_43_ = met1[i][j][k+1] * _t_46_ * met2[i][j][k+1];
_t_44_ = c2 * u1[i+2][j][k+1];
_t_44_ -= c2 * u1[i-2][j][k+1];
_t_44_ += c1 * u1[i+1][j][k+1];
_t_44_ -= c1 * u1[i-1][j][k+1];
_t_41_ = strx[i+2] * _t_43_ * _t_44_;
_t_0_ += c1 * _t_41_ * stry[j];
_t_49_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1];
_t_50_ = c2 * u2[i+2][j][k+1];
_t_50_ -= c2 * u2[i-2][j][k+1];
_t_50_ += c1 * u2[i+1][j][k+1];
_t_50_ -= c1 * u2[i-1][j][k+1];
_t_0_ += c1 * _t_49_ * _t_50_;
_t_55_ = met1[i][j][k+1] * mu[i][j][k+1] * met4[i][j][k+1];
_t_56_ = c2 * u3[i+2][j][k+1];
_t_56_ -= c2 * u3[i-2][j][k+1];
_t_56_ += c1 * u3[i+1][j][k+1];
_t_56_ -= c1 * u3[i-1][j][k+1];
_t_54_ = _t_55_ * _t_56_;
_t_0_ += c1 * _t_54_ * stry[j];
_t_65_ = 2.0 * mu[i][j][k-1];
_t_65_ += la[i][j][k-1];
_t_62_ = met1[i][j][k-1] * _t_65_ * met2[i][j][k-1];
_t_63_ = c2 * u1[i+2][j][k-1];
_t_63_ -= c2 * u1[i-2][j][k-1];
_t_63_ += c1 * u1[i+1][j][k-1];
_t_63_ -= c1 * u1[i-1][j][k-1];
_t_60_ = strx[i-2] * _t_62_ * _t_63_;
_t_0_ += c1 * _t_60_ * stry[j];
_t_68_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1];
_t_69_ = c2 * u2[i+2][j][k-1];
_t_69_ -= c2 * u2[i-2][j][k-1];
_t_69_ += c1 * u2[i+1][j][k-1];
_t_69_ -= c1 * u2[i-1][j][k-1];
_t_0_ += c1 * _t_68_ * _t_69_;
_t_74_ = met1[i][j][k-1] * mu[i][j][k-1] * met4[i][j][k-1];
_t_75_ = c2 * u3[i+2][j][k-1];
_t_75_ -= c2 * u3[i-2][j][k-1];
_t_75_ += c1 * u3[i+1][j][k-1];
_t_75_ -= c1 * u3[i-1][j][k-1];
_t_73_ = _t_74_ * _t_75_;
_t_0_ += c1 * _t_73_ * stry[j];
r1ic0jc0kc0 += _t_0_;
_t_86_ = 2.0 * mu[i+2][j][k];
_t_86_ += la[i+2][j][k];
_t_83_ = met1[i+2][j][k] * _t_86_ * met2[i+2][j][k];
_t_84_ = c2 * u1[i+2][j][k+2];
_t_84_ -= c2 * u1[i+2][j][k-2];
_t_84_ += c1 * u1[i+2][j][k+1];
_t_84_ -= c1 * u1[i+2][j][k-1];
_t_81_ = strx[i] * _t_83_ * _t_84_;
_t_90_ = met1[i+2][j][k] * la[i+2][j][k] * met3[i+2][j][k];
_t_91_ = c2 * u2[i+2][j][k+2];
_t_91_ -= c2 * u2[i+2][j][k-2];
_t_91_ += c1 * u2[i+2][j][k+1];
_t_91_ -= c1 * u2[i+2][j][k-1];
_t_81_ += stry[j] * _t_90_ * _t_91_;
_t_95_ = met1[i+2][j][k] * la[i+2][j][k] * met4[i+2][j][k];
_t_96_ = c2 * u3[i+2][j][k+2];
_t_96_ -= c2 * u3[i+2][j][k-2];
_t_96_ += c1 * u3[i+2][j][k+1];
_t_96_ -= c1 * u3[i+2][j][k-1];
_t_81_ += _t_95_ * _t_96_;
_t_104_ = 2.0 * mu[i-2][j][k];
_t_104_ += la[i-2][j][k];
_t_101_ = met1[i-2][j][k] * _t_104_ * met2[i-2][j][k];
_t_102_ = c2 * u1[i-2][j][k+2];
_t_102_ -= c2 * u1[i-2][j][k-2];
_t_102_ += c1 * u1[i-2][j][k+1];
_t_102_ -= c1 * u1[i-2][j][k-1];
_t_81_ += strx[i] * _t_101_ * _t_102_;
_t_108_ = met1[i-2][j][k] * la[i-2][j][k] * met3[i-2][j][k];
_t_109_ = c2 * u2[i-2][j][k+2];
_t_109_ -= c2 * u2[i-2][j][k-2];
_t_109_ += c1 * u2[i-2][j][k+1];
_t_109_ -= c1 * u2[i-2][j][k-1];
_t_81_ += stry[j] * _t_108_ * _t_109_;
_t_113_ = met1[i-2][j][k] * la[i-2][j][k] * met4[i-2][j][k];
_t_114_ = c2 * u3[i-2][j][k+2];
_t_114_ -= c2 * u3[i-2][j][k-2];
_t_114_ += c1 * u3[i-2][j][k+1];
_t_114_ -= c1 * u3[i-2][j][k-1];
_t_81_ += _t_113_ * _t_114_;
_t_79_ = stry[j] * c2 * _t_81_;
_t_123_ = 2.0 * mu[i+1][j][k];
_t_123_ += la[i+1][j][k];
_t_120_ = met1[i+1][j][k] * _t_123_ * met2[i+1][j][k];
_t_121_ = c2 * u1[i+1][j][k+2];
_t_121_ -= c2 * u1[i+1][j][k-2];
_t_121_ += c1 * u1[i+1][j][k+1];
_t_121_ -= c1 * u1[i+1][j][k-1];
_t_118_ = strx[i] * _t_120_ * _t_121_;
_t_127_ = met1[i+1][j][k] * la[i+1][j][k] * met3[i+1][j][k];
_t_128_ = c2 * u2[i+1][j][k+2];
_t_128_ -= c2 * u2[i+1][j][k-2];
_t_128_ += c1 * u2[i+1][j][k+1];
_t_128_ -= c1 * u2[i+1][j][k-1];
_t_118_ += stry[j] * _t_127_ * _t_128_;
_t_132_ = met1[i+1][j][k] * la[i+1][j][k] * met4[i+1][j][k];
_t_133_ = c2 * u3[i+1][j][k+2];
_t_133_ -= c2 * u3[i+1][j][k-2];
_t_133_ += c1 * u3[i+1][j][k+1];
_t_133_ -= c1 * u3[i+1][j][k-1];
_t_118_ += _t_132_ * _t_133_;
_t_141_ = 2.0 * mu[i-1][j][k];
_t_141_ += la[i-1][j][k];
_t_138_ = met1[i-1][j][k] * _t_141_ * met2[i-1][j][k];
_t_139_ = c2 * u1[i-1][j][k+2];
_t_139_ -= c2 * u1[i-1][j][k-2];
_t_139_ += c1 * u1[i-1][j][k+1];
_t_139_ -= c1 * u1[i-1][j][k-1];
_t_118_ += strx[i] * _t_138_ * _t_139_;
_t_145_ = met1[i-1][j][k] * la[i-1][j][k] * met3[i-1][j][k];
_t_146_ = c2 * u2[i-1][j][k+2];
_t_146_ -= c2 * u2[i-1][j][k-2];
_t_146_ += c1 * u2[i-1][j][k+1];
_t_146_ -= c1 * u2[i-1][j][k-1];
_t_118_ += stry[j] * _t_145_ * _t_146_;
_t_150_ = met1[i-1][j][k] * la[i-1][j][k] * met4[i-1][j][k];
_t_151_ = c2 * u3[i-1][j][k+2];
_t_151_ -= c2 * u3[i-1][j][k-2];
_t_151_ += c1 * u3[i-1][j][k+1];
_t_151_ -= c1 * u3[i-1][j][k-1];
_t_118_ += _t_150_ * _t_151_;
_t_79_ += stry[j] * c1 * _t_118_;
r1ic0jc0kc0 += _t_79_;
_t_159_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2];
_t_160_ = c2 * u1[i][j+2][k+2];
_t_160_ -= c2 * u1[i][j-2][k+2];
_t_160_ += c1 * u1[i][j+1][k+2];
_t_160_ -= c1 * u1[i][j-1][k+2];
_t_157_ = stry[j+2] * _t_159_ * _t_160_;
_t_155_ = c2 * _t_157_ * strx[i];
_t_164_ = met1[i][j][k+2] * la[i][j][k+2] * met2[i][j][k+2];
_t_165_ = c2 * u2[i][j+2][k+2];
_t_165_ -= c2 * u2[i][j-2][k+2];
_t_165_ += c1 * u2[i][j+1][k+2];
_t_165_ -= c1 * u2[i][j-1][k+2];
_t_155_ += c2 * _t_164_ * _t_165_;
_t_171_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2];
_t_172_ = c2 * u1[i][j+2][k-2];
_t_172_ -= c2 * u1[i][j-2][k-2];
_t_172_ += c1 * u1[i][j+1][k-2];
_t_172_ -= c1 * u1[i][j-1][k-2];
_t_169_ = stry[j] * _t_171_ * _t_172_;
_t_155_ += c2 * _t_169_ * strx[i];
_t_176_ = met1[i][j][k-2] * la[i][j][k-2] * met2[i][j][k-2];
_t_177_ = c2 * u2[i][j+2][k-2];
_t_177_ -= c2 * u2[i][j-2][k-2];
_t_177_ += c1 * u2[i][j+1][k-2];
_t_177_ -= c1 * u2[i][j-1][k-2];
_t_155_ += c2 * _t_176_ * _t_177_;
_t_184_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1];
_t_185_ = c2 * u1[i][j+2][k+1];
_t_185_ -= c2 * u1[i][j-2][k+1];
_t_185_ += c1 * u1[i][j+1][k+1];
_t_185_ -= c1 * u1[i][j-1][k+1];
_t_182_ = stry[j-2] * _t_184_ * _t_185_;
_t_155_ += c1 * _t_182_ * strx[i];
_t_189_ = met1[i][j][k+1] * la[i][j][k+1] * met2[i][j][k+1];
_t_190_ = c2 * u2[i][j+2][k+1];
_t_190_ -= c2 * u2[i][j-2][k+1];
_t_190_ += c1 * u2[i][j+1][k+1];
_t_190_ -= c1 * u2[i][j-1][k+1];
_t_155_ += c1 * _t_189_ * _t_190_;
_t_196_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1];
_t_197_ = c2 * u1[i][j+2][k-1];
_t_197_ -= c2 * u1[i][j-2][k-1];
_t_197_ += c1 * u1[i][j+1][k-1];
_t_197_ -= c1 * u1[i][j-1][k-1];
_t_194_ = stry[j] * _t_196_ * _t_197_;
_t_155_ += c1 * _t_194_ * strx[i];
_t_201_ = met1[i][j][k-1] * la[i][j][k-1] * met2[i][j][k-1];
_t_202_ = c2 * u2[i][j+2][k-1];
_t_202_ -= c2 * u2[i][j-2][k-1];
_t_202_ += c1 * u2[i][j+1][k-1];
_t_202_ -= c1 * u2[i][j-1][k-1];
_t_155_ += c1 * _t_201_ * _t_202_;
r1ic0jc0kc0 += _t_155_;
r1[i][j][k] = r1ic0jc0kc0;
r1[i][j][k] += c2*(
mu[i][j+2][k]*met3[i][j+2][k]*met1[i][j+2][k]*(
c2*(u1[i][j+2][k+2]-u1[i][j+2][k-2]) +
c1*(u1[i][j+2][k+1]-u1[i][j+2][k-1]) )*stry[j+1]*strx[i]
+ mu[i][j+2][k]*met2[i][j+2][k]*met1[i][j+2][k]*(
c2*(u2[i][j+2][k+2]-u2[i][j+2][k-2]) +
c1*(u2[i][j+2][k+1]-u2[i][j+2][k-1]) )
+ ( mu[i][j-2][k]*met3[i][j-2][k]*met1[i][j-2][k]*(
c2*(u1[i][j-2][k+2]-u1[i][j-2][k-2]) +
c1*(u1[i][j-2][k+1]-u1[i][j-2][k-1]) )*stry[j]*strx[i]
+ mu[i][j-2][k]*met2[i][j-2][k]*met1[i][j-2][k]*(
c2*(u2[i][j-2][k+2]-u2[i][j-2][k-2]) +
c1*(u2[i][j-2][k+1]-u2[i][j-2][k-1]) ) )
) + c1*(
mu[i][j+1][k]*met3[i][j+1][k]*met1[i][j+1][k]*(
c2*(u1[i][j+1][k+2]-u1[i][j+1][k-2]) +
c1*(u1[i][j+1][k+1]-u1[i][j+1][k-1]) )*stry[j-1]*strx[i]
+ mu[i][j+1][k]*met2[i][j+1][k]*met1[i][j+1][k]*(
c2*(u2[i][j+1][k+2]-u2[i][j+1][k-2]) +
c1*(u2[i][j+1][k+1]-u2[i][j+1][k-1]) )
+ ( mu[i][j-1][k]*met3[i][j-1][k]*met1[i][j-1][k]*(
c2*(u1[i][j-1][k+2]-u1[i][j-1][k-2]) +
c1*(u1[i][j-1][k+1]-u1[i][j-1][k-1]) )*stry[j]*strx[i]
+ mu[i][j-1][k]*met2[i][j-1][k]*met1[i][j-1][k]*(
c2*(u2[i][j-1][k+2]-u2[i][j-1][k-2]) +
c1*(u2[i][j-1][k+1]-u2[i][j-1][k-1]) ) ) );
r1[i][j][k] +=
c2*( mu[i][j+2][k]*met1[i][j+2][k]*met1[i][j+2][k]*(
c2*(u2[i+2][j+2][k]-u2[i-2][j+2][k]) +
c1*(u2[i+1][j+2][k]-u2[i-1][j+2][k]) )
+ mu[i][j-2][k]*met1[i][j-2][k]*met1[i][j-2][k]*(
c2*(u2[i+2][j-2][k]-u2[i-2][j-2][k])+
c1*(u2[i+1][j-2][k]-u2[i-1][j-2][k]) )
) +
c1*( mu[i][j+1][k]*met1[i][j+1][k]*met1[i][j+1][k]*(
c2*(u2[i+2][j+1][k]-u2[i-2][j+1][k]) +
c1*(u2[i+1][j+1][k]-u2[i-1][j+1][k]) )
+ mu[i][j-1][k]*met1[i][j-1][k]*met1[i][j-1][k]*(
c2*(u2[i+2][j-1][k]-u2[i-2][j-1][k]) +
c1*(u2[i+1][j-1][k]-u2[i-1][j-1][k])))
+
c2*( la[i+2][j][k]*met1[i+2][j][k]*met1[i+2][j][k]*(
c2*(u2[i+2][j+2][k]-u2[i+2][j-2][k]) +
c1*(u2[i+2][j+1][k]-u2[i+2][j-1][k]) )
+ la[i-2][j][k]*met1[i-2][j][k]*met1[i-2][j][k]*(
c2*(u2[i-2][j+2][k]-u2[i-2][j-2][k])+
c1*(u2[i-2][j+1][k]-u2[i-2][j-1][k]) )
) +
c1*( la[i+1][j][k]*met1[i+1][j][k]*met1[i+1][j][k]*(
c2*(u2[i+1][j+2][k]-u2[i+1][j-2][k]) +
c1*(u2[i+1][j+1][k]-u2[i+1][j-1][k]) )
+ la[i-1][j][k]*met1[i-1][j][k]*met1[i-1][j][k]*(
c2*(u2[i-1][j+2][k]-u2[i-1][j-2][k]) +
c1*(u2[i-1][j+1][k]-u2[i-1][j-1][k])));
}
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
hipMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
hipMemcpy (r1, h_r1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u1;
hipMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
hipMemcpy (u1, h_u1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u2;
hipMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
hipMemcpy (u2, h_u2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u3;
hipMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
hipMemcpy (u3, h_u3, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *mu;
hipMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
hipMemcpy (mu, h_mu, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *la;
hipMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
hipMemcpy (la, h_la, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met1;
hipMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
hipMemcpy (met1, h_met1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met2;
hipMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
hipMemcpy (met2, h_met2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met3;
hipMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
hipMemcpy (met3, h_met3, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met4;
hipMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
hipMemcpy (met4, h_met4, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *strx;
hipMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
hipMemcpy (strx, h_strx, sizeof(double)*N, hipMemcpyHostToDevice);
double *stry;
hipMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
hipMemcpy (stry, h_stry, sizeof(double)*N, hipMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
hipLaunchKernelGGL(( curvi) , dim3(gridconfig), dim3(blockconfig), 0, 0, r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
hipMemcpy (h_r1, r1, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
}
| 3994cbcd8586732c36c3cc66ee9b38f193efe0a1.cu | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void curvi (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3) {
for (int i=2; i<=N-3; i++) {
double _t_7_;
double _t_4_;
double _t_5_;
double _t_2_;
double _t_0_;
double _t_10_;
double _t_11_;
double _t_16_;
double _t_17_;
double _t_15_;
double _t_26_;
double _t_23_;
double _t_24_;
double _t_21_;
double _t_29_;
double _t_30_;
double _t_35_;
double _t_36_;
double _t_34_;
double _t_46_;
double _t_43_;
double _t_44_;
double _t_41_;
double _t_49_;
double _t_50_;
double _t_55_;
double _t_56_;
double _t_54_;
double _t_65_;
double _t_62_;
double _t_63_;
double _t_60_;
double _t_68_;
double _t_69_;
double _t_74_;
double _t_75_;
double _t_73_;
double r1ic0jc0kc0 = r1[i][j][k];
double _t_86_;
double _t_83_;
double _t_84_;
double _t_81_;
double _t_90_;
double _t_91_;
double _t_95_;
double _t_96_;
double _t_104_;
double _t_101_;
double _t_102_;
double _t_108_;
double _t_109_;
double _t_113_;
double _t_114_;
double _t_79_;
double _t_123_;
double _t_120_;
double _t_121_;
double _t_118_;
double _t_127_;
double _t_128_;
double _t_132_;
double _t_133_;
double _t_141_;
double _t_138_;
double _t_139_;
double _t_145_;
double _t_146_;
double _t_150_;
double _t_151_;
double _t_159_;
double _t_160_;
double _t_157_;
double _t_155_;
double _t_164_;
double _t_165_;
double _t_171_;
double _t_172_;
double _t_169_;
double _t_176_;
double _t_177_;
double _t_184_;
double _t_185_;
double _t_182_;
double _t_189_;
double _t_190_;
double _t_196_;
double _t_197_;
double _t_194_;
double _t_201_;
double _t_202_;
_t_7_ = 2.0 * mu[i][j][k+2];
_t_7_ += la[i][j][k+2];
_t_4_ = met1[i][j][k+2] * _t_7_ * met2[i][j][k+2];
_t_5_ = c2 * u1[i+2][j][k+2];
_t_5_ -= c2 * u1[i-2][j][k+2];
_t_5_ += c1 * u1[i+1][j][k+2];
_t_5_ -= c1 * u1[i-1][j][k+2];
_t_2_ = strx[i] * _t_4_ * _t_5_;
_t_0_ = c2 * _t_2_ * stry[j];
_t_10_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2];
_t_11_ = c2 * u2[i+2][j][k+2];
_t_11_ -= c2 * u2[i-2][j][k+2];
_t_11_ += c1 * u2[i+1][j][k+2];
_t_11_ -= c1 * u2[i-1][j][k+2];
_t_0_ += c2 * _t_10_ * _t_11_;
_t_16_ = met1[i][j][k+2] * mu[i][j][k+2] * met4[i][j][k+2];
_t_17_ = c2 * u3[i+2][j][k+2];
_t_17_ -= c2 * u3[i-2][j][k+2];
_t_17_ += c1 * u3[i+1][j][k+2];
_t_17_ -= c1 * u3[i-1][j][k+2];
_t_15_ = _t_16_ * _t_17_;
_t_0_ += c2 * _t_15_ * stry[j];
_t_26_ = 2.0 * mu[i][j][k-2];
_t_26_ += la[i][j][k-2];
_t_23_ = met1[i][j][k-2] * _t_26_ * met2[i][j][k-2];
_t_24_ = c2 * u1[i+2][j][k-2];
_t_24_ -= c2 * u1[i-2][j][k-2];
_t_24_ += c1 * u1[i+1][j][k-2];
_t_24_ -= c1 * u1[i-1][j][k-2];
_t_21_ = strx[i] * _t_23_ * _t_24_;
_t_0_ += c2 * _t_21_ * stry[j];
_t_29_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2];
_t_30_ = c2 * u2[i+2][j][k-2];
_t_30_ -= c2 * u2[i-2][j][k-2];
_t_30_ += c1 * u2[i+1][j][k-2];
_t_30_ -= c1 * u2[i-1][j][k-2];
_t_0_ += c2 * _t_29_ * _t_30_;
_t_35_ = met1[i][j][k-2] * mu[i][j][k-2] * met4[i][j][k-2];
_t_36_ = c2 * u3[i+2][j][k-2];
_t_36_ -= c2 * u3[i-2][j][k-2];
_t_36_ += c1 * u3[i+1][j][k-2];
_t_36_ -= c1 * u3[i-1][j][k-2];
_t_34_ = _t_35_ * _t_36_;
_t_0_ += c2 * _t_34_ * stry[j];
_t_46_ = 2.0 * mu[i][j][k+1];
_t_46_ += la[i][j][k+1];
_t_43_ = met1[i][j][k+1] * _t_46_ * met2[i][j][k+1];
_t_44_ = c2 * u1[i+2][j][k+1];
_t_44_ -= c2 * u1[i-2][j][k+1];
_t_44_ += c1 * u1[i+1][j][k+1];
_t_44_ -= c1 * u1[i-1][j][k+1];
_t_41_ = strx[i+2] * _t_43_ * _t_44_;
_t_0_ += c1 * _t_41_ * stry[j];
_t_49_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1];
_t_50_ = c2 * u2[i+2][j][k+1];
_t_50_ -= c2 * u2[i-2][j][k+1];
_t_50_ += c1 * u2[i+1][j][k+1];
_t_50_ -= c1 * u2[i-1][j][k+1];
_t_0_ += c1 * _t_49_ * _t_50_;
_t_55_ = met1[i][j][k+1] * mu[i][j][k+1] * met4[i][j][k+1];
_t_56_ = c2 * u3[i+2][j][k+1];
_t_56_ -= c2 * u3[i-2][j][k+1];
_t_56_ += c1 * u3[i+1][j][k+1];
_t_56_ -= c1 * u3[i-1][j][k+1];
_t_54_ = _t_55_ * _t_56_;
_t_0_ += c1 * _t_54_ * stry[j];
_t_65_ = 2.0 * mu[i][j][k-1];
_t_65_ += la[i][j][k-1];
_t_62_ = met1[i][j][k-1] * _t_65_ * met2[i][j][k-1];
_t_63_ = c2 * u1[i+2][j][k-1];
_t_63_ -= c2 * u1[i-2][j][k-1];
_t_63_ += c1 * u1[i+1][j][k-1];
_t_63_ -= c1 * u1[i-1][j][k-1];
_t_60_ = strx[i-2] * _t_62_ * _t_63_;
_t_0_ += c1 * _t_60_ * stry[j];
_t_68_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1];
_t_69_ = c2 * u2[i+2][j][k-1];
_t_69_ -= c2 * u2[i-2][j][k-1];
_t_69_ += c1 * u2[i+1][j][k-1];
_t_69_ -= c1 * u2[i-1][j][k-1];
_t_0_ += c1 * _t_68_ * _t_69_;
_t_74_ = met1[i][j][k-1] * mu[i][j][k-1] * met4[i][j][k-1];
_t_75_ = c2 * u3[i+2][j][k-1];
_t_75_ -= c2 * u3[i-2][j][k-1];
_t_75_ += c1 * u3[i+1][j][k-1];
_t_75_ -= c1 * u3[i-1][j][k-1];
_t_73_ = _t_74_ * _t_75_;
_t_0_ += c1 * _t_73_ * stry[j];
r1ic0jc0kc0 += _t_0_;
_t_86_ = 2.0 * mu[i+2][j][k];
_t_86_ += la[i+2][j][k];
_t_83_ = met1[i+2][j][k] * _t_86_ * met2[i+2][j][k];
_t_84_ = c2 * u1[i+2][j][k+2];
_t_84_ -= c2 * u1[i+2][j][k-2];
_t_84_ += c1 * u1[i+2][j][k+1];
_t_84_ -= c1 * u1[i+2][j][k-1];
_t_81_ = strx[i] * _t_83_ * _t_84_;
_t_90_ = met1[i+2][j][k] * la[i+2][j][k] * met3[i+2][j][k];
_t_91_ = c2 * u2[i+2][j][k+2];
_t_91_ -= c2 * u2[i+2][j][k-2];
_t_91_ += c1 * u2[i+2][j][k+1];
_t_91_ -= c1 * u2[i+2][j][k-1];
_t_81_ += stry[j] * _t_90_ * _t_91_;
_t_95_ = met1[i+2][j][k] * la[i+2][j][k] * met4[i+2][j][k];
_t_96_ = c2 * u3[i+2][j][k+2];
_t_96_ -= c2 * u3[i+2][j][k-2];
_t_96_ += c1 * u3[i+2][j][k+1];
_t_96_ -= c1 * u3[i+2][j][k-1];
_t_81_ += _t_95_ * _t_96_;
_t_104_ = 2.0 * mu[i-2][j][k];
_t_104_ += la[i-2][j][k];
_t_101_ = met1[i-2][j][k] * _t_104_ * met2[i-2][j][k];
_t_102_ = c2 * u1[i-2][j][k+2];
_t_102_ -= c2 * u1[i-2][j][k-2];
_t_102_ += c1 * u1[i-2][j][k+1];
_t_102_ -= c1 * u1[i-2][j][k-1];
_t_81_ += strx[i] * _t_101_ * _t_102_;
_t_108_ = met1[i-2][j][k] * la[i-2][j][k] * met3[i-2][j][k];
_t_109_ = c2 * u2[i-2][j][k+2];
_t_109_ -= c2 * u2[i-2][j][k-2];
_t_109_ += c1 * u2[i-2][j][k+1];
_t_109_ -= c1 * u2[i-2][j][k-1];
_t_81_ += stry[j] * _t_108_ * _t_109_;
_t_113_ = met1[i-2][j][k] * la[i-2][j][k] * met4[i-2][j][k];
_t_114_ = c2 * u3[i-2][j][k+2];
_t_114_ -= c2 * u3[i-2][j][k-2];
_t_114_ += c1 * u3[i-2][j][k+1];
_t_114_ -= c1 * u3[i-2][j][k-1];
_t_81_ += _t_113_ * _t_114_;
_t_79_ = stry[j] * c2 * _t_81_;
_t_123_ = 2.0 * mu[i+1][j][k];
_t_123_ += la[i+1][j][k];
_t_120_ = met1[i+1][j][k] * _t_123_ * met2[i+1][j][k];
_t_121_ = c2 * u1[i+1][j][k+2];
_t_121_ -= c2 * u1[i+1][j][k-2];
_t_121_ += c1 * u1[i+1][j][k+1];
_t_121_ -= c1 * u1[i+1][j][k-1];
_t_118_ = strx[i] * _t_120_ * _t_121_;
_t_127_ = met1[i+1][j][k] * la[i+1][j][k] * met3[i+1][j][k];
_t_128_ = c2 * u2[i+1][j][k+2];
_t_128_ -= c2 * u2[i+1][j][k-2];
_t_128_ += c1 * u2[i+1][j][k+1];
_t_128_ -= c1 * u2[i+1][j][k-1];
_t_118_ += stry[j] * _t_127_ * _t_128_;
_t_132_ = met1[i+1][j][k] * la[i+1][j][k] * met4[i+1][j][k];
_t_133_ = c2 * u3[i+1][j][k+2];
_t_133_ -= c2 * u3[i+1][j][k-2];
_t_133_ += c1 * u3[i+1][j][k+1];
_t_133_ -= c1 * u3[i+1][j][k-1];
_t_118_ += _t_132_ * _t_133_;
_t_141_ = 2.0 * mu[i-1][j][k];
_t_141_ += la[i-1][j][k];
_t_138_ = met1[i-1][j][k] * _t_141_ * met2[i-1][j][k];
_t_139_ = c2 * u1[i-1][j][k+2];
_t_139_ -= c2 * u1[i-1][j][k-2];
_t_139_ += c1 * u1[i-1][j][k+1];
_t_139_ -= c1 * u1[i-1][j][k-1];
_t_118_ += strx[i] * _t_138_ * _t_139_;
_t_145_ = met1[i-1][j][k] * la[i-1][j][k] * met3[i-1][j][k];
_t_146_ = c2 * u2[i-1][j][k+2];
_t_146_ -= c2 * u2[i-1][j][k-2];
_t_146_ += c1 * u2[i-1][j][k+1];
_t_146_ -= c1 * u2[i-1][j][k-1];
_t_118_ += stry[j] * _t_145_ * _t_146_;
_t_150_ = met1[i-1][j][k] * la[i-1][j][k] * met4[i-1][j][k];
_t_151_ = c2 * u3[i-1][j][k+2];
_t_151_ -= c2 * u3[i-1][j][k-2];
_t_151_ += c1 * u3[i-1][j][k+1];
_t_151_ -= c1 * u3[i-1][j][k-1];
_t_118_ += _t_150_ * _t_151_;
_t_79_ += stry[j] * c1 * _t_118_;
r1ic0jc0kc0 += _t_79_;
_t_159_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2];
_t_160_ = c2 * u1[i][j+2][k+2];
_t_160_ -= c2 * u1[i][j-2][k+2];
_t_160_ += c1 * u1[i][j+1][k+2];
_t_160_ -= c1 * u1[i][j-1][k+2];
_t_157_ = stry[j+2] * _t_159_ * _t_160_;
_t_155_ = c2 * _t_157_ * strx[i];
_t_164_ = met1[i][j][k+2] * la[i][j][k+2] * met2[i][j][k+2];
_t_165_ = c2 * u2[i][j+2][k+2];
_t_165_ -= c2 * u2[i][j-2][k+2];
_t_165_ += c1 * u2[i][j+1][k+2];
_t_165_ -= c1 * u2[i][j-1][k+2];
_t_155_ += c2 * _t_164_ * _t_165_;
_t_171_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2];
_t_172_ = c2 * u1[i][j+2][k-2];
_t_172_ -= c2 * u1[i][j-2][k-2];
_t_172_ += c1 * u1[i][j+1][k-2];
_t_172_ -= c1 * u1[i][j-1][k-2];
_t_169_ = stry[j] * _t_171_ * _t_172_;
_t_155_ += c2 * _t_169_ * strx[i];
_t_176_ = met1[i][j][k-2] * la[i][j][k-2] * met2[i][j][k-2];
_t_177_ = c2 * u2[i][j+2][k-2];
_t_177_ -= c2 * u2[i][j-2][k-2];
_t_177_ += c1 * u2[i][j+1][k-2];
_t_177_ -= c1 * u2[i][j-1][k-2];
_t_155_ += c2 * _t_176_ * _t_177_;
_t_184_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1];
_t_185_ = c2 * u1[i][j+2][k+1];
_t_185_ -= c2 * u1[i][j-2][k+1];
_t_185_ += c1 * u1[i][j+1][k+1];
_t_185_ -= c1 * u1[i][j-1][k+1];
_t_182_ = stry[j-2] * _t_184_ * _t_185_;
_t_155_ += c1 * _t_182_ * strx[i];
_t_189_ = met1[i][j][k+1] * la[i][j][k+1] * met2[i][j][k+1];
_t_190_ = c2 * u2[i][j+2][k+1];
_t_190_ -= c2 * u2[i][j-2][k+1];
_t_190_ += c1 * u2[i][j+1][k+1];
_t_190_ -= c1 * u2[i][j-1][k+1];
_t_155_ += c1 * _t_189_ * _t_190_;
_t_196_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1];
_t_197_ = c2 * u1[i][j+2][k-1];
_t_197_ -= c2 * u1[i][j-2][k-1];
_t_197_ += c1 * u1[i][j+1][k-1];
_t_197_ -= c1 * u1[i][j-1][k-1];
_t_194_ = stry[j] * _t_196_ * _t_197_;
_t_155_ += c1 * _t_194_ * strx[i];
_t_201_ = met1[i][j][k-1] * la[i][j][k-1] * met2[i][j][k-1];
_t_202_ = c2 * u2[i][j+2][k-1];
_t_202_ -= c2 * u2[i][j-2][k-1];
_t_202_ += c1 * u2[i][j+1][k-1];
_t_202_ -= c1 * u2[i][j-1][k-1];
_t_155_ += c1 * _t_201_ * _t_202_;
r1ic0jc0kc0 += _t_155_;
r1[i][j][k] = r1ic0jc0kc0;
r1[i][j][k] += c2*(
mu[i][j+2][k]*met3[i][j+2][k]*met1[i][j+2][k]*(
c2*(u1[i][j+2][k+2]-u1[i][j+2][k-2]) +
c1*(u1[i][j+2][k+1]-u1[i][j+2][k-1]) )*stry[j+1]*strx[i]
+ mu[i][j+2][k]*met2[i][j+2][k]*met1[i][j+2][k]*(
c2*(u2[i][j+2][k+2]-u2[i][j+2][k-2]) +
c1*(u2[i][j+2][k+1]-u2[i][j+2][k-1]) )
+ ( mu[i][j-2][k]*met3[i][j-2][k]*met1[i][j-2][k]*(
c2*(u1[i][j-2][k+2]-u1[i][j-2][k-2]) +
c1*(u1[i][j-2][k+1]-u1[i][j-2][k-1]) )*stry[j]*strx[i]
+ mu[i][j-2][k]*met2[i][j-2][k]*met1[i][j-2][k]*(
c2*(u2[i][j-2][k+2]-u2[i][j-2][k-2]) +
c1*(u2[i][j-2][k+1]-u2[i][j-2][k-1]) ) )
) + c1*(
mu[i][j+1][k]*met3[i][j+1][k]*met1[i][j+1][k]*(
c2*(u1[i][j+1][k+2]-u1[i][j+1][k-2]) +
c1*(u1[i][j+1][k+1]-u1[i][j+1][k-1]) )*stry[j-1]*strx[i]
+ mu[i][j+1][k]*met2[i][j+1][k]*met1[i][j+1][k]*(
c2*(u2[i][j+1][k+2]-u2[i][j+1][k-2]) +
c1*(u2[i][j+1][k+1]-u2[i][j+1][k-1]) )
+ ( mu[i][j-1][k]*met3[i][j-1][k]*met1[i][j-1][k]*(
c2*(u1[i][j-1][k+2]-u1[i][j-1][k-2]) +
c1*(u1[i][j-1][k+1]-u1[i][j-1][k-1]) )*stry[j]*strx[i]
+ mu[i][j-1][k]*met2[i][j-1][k]*met1[i][j-1][k]*(
c2*(u2[i][j-1][k+2]-u2[i][j-1][k-2]) +
c1*(u2[i][j-1][k+1]-u2[i][j-1][k-1]) ) ) );
r1[i][j][k] +=
c2*( mu[i][j+2][k]*met1[i][j+2][k]*met1[i][j+2][k]*(
c2*(u2[i+2][j+2][k]-u2[i-2][j+2][k]) +
c1*(u2[i+1][j+2][k]-u2[i-1][j+2][k]) )
+ mu[i][j-2][k]*met1[i][j-2][k]*met1[i][j-2][k]*(
c2*(u2[i+2][j-2][k]-u2[i-2][j-2][k])+
c1*(u2[i+1][j-2][k]-u2[i-1][j-2][k]) )
) +
c1*( mu[i][j+1][k]*met1[i][j+1][k]*met1[i][j+1][k]*(
c2*(u2[i+2][j+1][k]-u2[i-2][j+1][k]) +
c1*(u2[i+1][j+1][k]-u2[i-1][j+1][k]) )
+ mu[i][j-1][k]*met1[i][j-1][k]*met1[i][j-1][k]*(
c2*(u2[i+2][j-1][k]-u2[i-2][j-1][k]) +
c1*(u2[i+1][j-1][k]-u2[i-1][j-1][k])))
+
c2*( la[i+2][j][k]*met1[i+2][j][k]*met1[i+2][j][k]*(
c2*(u2[i+2][j+2][k]-u2[i+2][j-2][k]) +
c1*(u2[i+2][j+1][k]-u2[i+2][j-1][k]) )
+ la[i-2][j][k]*met1[i-2][j][k]*met1[i-2][j][k]*(
c2*(u2[i-2][j+2][k]-u2[i-2][j-2][k])+
c1*(u2[i-2][j+1][k]-u2[i-2][j-1][k]) )
) +
c1*( la[i+1][j][k]*met1[i+1][j][k]*met1[i+1][j][k]*(
c2*(u2[i+1][j+2][k]-u2[i+1][j-2][k]) +
c1*(u2[i+1][j+1][k]-u2[i+1][j-1][k]) )
+ la[i-1][j][k]*met1[i-1][j][k]*met1[i-1][j][k]*(
c2*(u2[i-1][j+2][k]-u2[i-1][j-2][k]) +
c1*(u2[i-1][j+1][k]-u2[i-1][j-1][k])));
}
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
cudaMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u1;
cudaMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u2;
cudaMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u3;
cudaMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met1;
cudaMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met2;
cudaMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met3;
cudaMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met4;
cudaMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
curvi <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
}
|
acefc20fb8a495579e38a26f45b1a2efb55d77c1.hip | // !!! This is a file automatically generated by hipify!!!
/*****************************************************************************/
// Author: Xuefeng Ding <xuefeng.ding.physics@gmail.com>
// Insitute: Gran Sasso Science Institute, L'Aquila, 67100, Italy
// Date: 2018 April 7th
// Version: v1.0
// Description: GooStats, a statistical analysis toolkit that runs on GPU.
//
// All rights reserved. 2018 copyrighted.
/*****************************************************************************/
#include "MultiVariatePdf.h"
#include "goofit/BinnedDataSet.h"
#include "goofit/Variable.h"
int MultiVariatePdf::totalPdf = 0;
MEM_CONSTANT fptype *dev_mv_k[100];
MEM_CONSTANT fptype *dev_mv_n0[100];
MEM_CONSTANT fptype *dev_mv_n1[100];
DEVICE_VECTOR<fptype> *dev_vec_mv_k[100];
DEVICE_VECTOR<fptype> *dev_vec_mv_n0[100];
DEVICE_VECTOR<fptype> *dev_vec_mv_n1[100];
MEM_CONSTANT fptype dev_mv_m0[100];
MEM_CONSTANT fptype dev_mv_m1[100];
template <MultiVariatePdf::MVLLType T>
EXEC_TARGET fptype MVLL(const fptype k, const fptype n0, const fptype n1, const fptype m0, const fptype m1);
#include "MultiVariatePdf.icc" // concrete implementation of the MultiVariate Likelihood
template <MultiVariatePdf::MVLLType T>
EXEC_TARGET fptype device_MV(fptype *evt, fptype *, unsigned int *indices) {
const fptype mv_val = evt[RO_CACHE(indices[2 + RO_CACHE(indices[0])])];
const int cIndex = RO_CACHE(indices[1]);
const fptype mv_lo = RO_CACHE(functorConstants[cIndex]);
const fptype mv_step = RO_CACHE(functorConstants[cIndex + 1]);
const int mv_bin =
(int)FLOOR((mv_val - mv_lo) / mv_step); // no problem with FLOOR: start from 0.5, which corresponse to bin=0
const int MVid = RO_CACHE(indices[2]);
const fptype m0 = RO_CACHE(dev_mv_m0[MVid]);
const fptype m1 = RO_CACHE(dev_mv_m1[MVid]);
const fptype k = RO_CACHE(dev_mv_k[MVid][mv_bin]);
const fptype n0 = RO_CACHE(dev_mv_n0[MVid][mv_bin]);
const fptype n1 = RO_CACHE(dev_mv_n1[MVid][mv_bin]);
const fptype ret = MVLL<T>(k, n0, n1, m0, m1);
//#ifdef NLL_CHECK
// printf("k %lf n0 %lf n1 %lf m0 %lf m1 %lf ret %le\n",
// fptype(k),fptype(n0),fptype(n1),m0,m1,EXP(ret));
//#endif
return -ret;
}
MEM_DEVICE device_function_ptr ptr_to_MV_StefanoDavini = device_MV<MultiVariatePdf::MVLLType::StefanoDavini>;
#include "goofit/PDFs/SumPdf.h"
const std::vector<int> MultiVariatePdf::get_pdfids(const std::vector<GooPdf *> &pdfs) {
std::vector<int> ids;
for (auto pdf : pdfs)
ids.push_back(SumPdf::registerFunc(static_cast<PdfBase *>(pdf)));
return ids;
}
const std::vector<int> MultiVariatePdf::get_Nids(const std::vector<Variable *> &rates) {
std::vector<int> ids;
for (auto rate : rates)
ids.push_back(registerParameter(rate));
return ids;
}
MultiVariatePdf::MultiVariatePdf(std::string n,
MVLLType MVLLtype,
Variable *mv_var,
BinnedDataSet *data,
const std::vector<BinnedDataSet *> &refs,
const std::vector<GooPdf *> &pdf_0_,
const std::vector<GooPdf *> &pdf_1_,
const std::vector<Variable *> &rate_0_,
const std::vector<Variable *> &rate_1_,
int startbin_,
int endbin_ /*startbin<=bin<endbin*/,
const SumPdf *sumpdf_,
double binVolume_)
: GooPdf(mv_var, n),
pdf_0(get_pdfids(pdf_0_)),
pdf_1(get_pdfids(pdf_1_)),
rate_0(get_Nids(rate_0_)),
rate_1(get_Nids(rate_1_)),
binVolume(binVolume_),
sumpdf(sumpdf_),
MVid(totalPdf++),
sum_k(-99),
I0(-99),
I1(-99),
Nbin(data->getNumBins()),
startbin(startbin_ - static_cast<int>((*(sumpdf->obsCBegin()))->lowerlimit)),
endbin(endbin_ - static_cast<int>((*(sumpdf->obsCBegin()))->lowerlimit)),
dev_iConsts(0LL) {
copyTH1DToGPU(data, refs);
std::vector<unsigned int> pindices;
pindices.push_back(registerConstants(2));
pindices.push_back(MVid /* index of the dn_histo used by this pdf*/); // 1
switch (MVLLtype) {
case MVLLType::StefanoDavini:
GET_FUNCTION_ADDR(ptr_to_MV_StefanoDavini);
break;
default:
abortWithCudaPrintFlush(__FILE__, __LINE__, getName() + " unknown MVLLtype", this);
}
initialise(pindices);
gooMalloc((void **)&dev_iConsts, 3 * sizeof(fptype));
fptype host_iConsts[3];
host_iConsts[0] = mv_var->lowerlimit;
host_iConsts[1] = (mv_var->upperlimit - mv_var->lowerlimit) / mv_var->numbins;
MEMCPY_TO_SYMBOL(
functorConstants,
host_iConsts,
2 * sizeof(fptype),
cIndex * sizeof(fptype),
hipMemcpyHostToDevice); // cIndex is a member derived from PdfBase and is set inside registerConstants method
host_iConsts[1] = mv_var->upperlimit;
host_iConsts[2] = mv_var->numbins;
MEMCPY(dev_iConsts, host_iConsts, 3 * sizeof(fptype), hipMemcpyHostToDevice);
}
void MultiVariatePdf::copyTH1DToGPU(BinnedDataSet *data, const std::vector<BinnedDataSet *> &refs) {
fptype *dev_address[1];
copyTH1DToGPU(data, sum_k, dev_address, dev_vec_mv_k[MVid]);
MEMCPY_TO_SYMBOL(dev_mv_k, dev_address, sizeof(fptype *), MVid * sizeof(fptype *), hipMemcpyHostToDevice);
copyTH1DToGPU(refs.at(0), I0, dev_address, dev_vec_mv_n0[MVid]);
MEMCPY_TO_SYMBOL(dev_mv_n0, dev_address, sizeof(fptype *), MVid * sizeof(fptype *), hipMemcpyHostToDevice);
copyTH1DToGPU(refs.at(1), I1, dev_address, dev_vec_mv_n1[MVid]);
MEMCPY_TO_SYMBOL(dev_mv_n1, dev_address, sizeof(fptype *), MVid * sizeof(fptype *), hipMemcpyHostToDevice);
}
void MultiVariatePdf::copyTH1DToGPU(BinnedDataSet *data,
fptype &sum,
fptype *dev_address[1],
DEVICE_VECTOR<fptype> *&dev_vec_address) {
thrust::host_vector<fptype> host_histogram;
unsigned int numbins = data->getNumBins();
sum = 0;
for (unsigned int i = 0; i < numbins; ++i) {
fptype curr = data->getBinContent(i);
sum += curr;
host_histogram.push_back(curr); // warning: you should normalize the histogram yourself.
}
dev_vec_address = new DEVICE_VECTOR<fptype>(host_histogram);
dev_address[0] = thrust::raw_pointer_cast(dev_vec_address->data());
}
__host__ fptype MultiVariatePdf::normalise() const {
sumpdf->normalise();
host_normalisation[parameters] = 1.0;
return 1;
}
__host__ double MultiVariatePdf::calculateNLL() const {
if (IsChisquareFit())
return 0;
return GooPdf::calculateNLL();
}
__host__ fptype MultiVariatePdf::sumOfNll(int __attribute__((__unused__)) numVars) const {
static fptype logL;
//if (sumpdf->updated()) {
calculate_m0m1();
static thrust::plus<fptype> cudaPlus;
thrust::counting_iterator<int> binIndex(0);
thrust::constant_iterator<fptype *> startendstep(dev_iConsts); // 3*fptype lo, hi and step for npe
thrust::constant_iterator<int> eventSize(1); // 1: only npe
fptype dummy = 0;
BinnedMetricTaker modalor(const_cast<MultiVariatePdf *>(this), getMetricPointer("ptr_to_Eval"));
logL =
thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(binIndex, eventSize, startendstep)),
thrust::make_zip_iterator(thrust::make_tuple(binIndex + Nbin, eventSize, startendstep)),
modalor,
dummy,
cudaPlus);
//}
#ifdef NLL_CHECK
DEVICE_VECTOR<fptype> dev_logLs(Nbin);
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(binIndex, eventSize, startendstep)),
thrust::make_zip_iterator(thrust::make_tuple(binIndex + Nbin, eventSize, startendstep)),
dev_logLs.begin(),
modalor);
thrust::host_vector<fptype> logLs(dev_logLs);
thrust::host_vector<fptype> k(*dev_vec_mv_k[MVid]);
thrust::host_vector<fptype> n0(*dev_vec_mv_n0[MVid]);
thrust::host_vector<fptype> n1(*dev_vec_mv_n1[MVid]);
double sum = 0;
const Variable *mv_var = *obsCBegin();
double lo = mv_var->lowerlimit;
double de = (mv_var->upperlimit - lo) / mv_var->numbins;
fptype m0, m1;
MEMCPY_FROM_SYMBOL(&m0, dev_mv_m0, sizeof(fptype), MVid * sizeof(fptype), hipMemcpyDeviceToHost);
MEMCPY_FROM_SYMBOL(&m1, dev_mv_m1, sizeof(fptype), MVid * sizeof(fptype), hipMemcpyDeviceToHost);
for (unsigned int i = 0; i < logLs.size(); ++i) {
sum += logLs[i];
printf("log(L)MV %.15le e %lf user %lf\n k %.2lf n0 %.2lf n1 %.2lf m0 %.10le m1 %.10le L %.15le\n",
sum,
(startbin + endbin) / 2. + static_cast<int>((*(sumpdf->obsCBegin()))->lowerlimit),
(i + 0.5) * de + lo,
k[i],
n0[i],
n1[i],
m0,
m1,
logLs[i]);
}
#endif
return logL;
}
void MultiVariatePdf::calculate_m0m1() const {
fptype N0 = 0, N1 = 0;
auto rateIdit = rate_0.begin();
for (auto pdfId : pdf_0) {
const double scale = sumpdf->Norm() * binVolume * host_params[*rateIdit];
N0 += thrust::reduce(componentWorkSpace[pdfId]->begin() + startbin, componentWorkSpace[pdfId]->begin() + endbin) *
scale;
#ifdef MV_CHECK
thrust::host_vector<fptype> values(*componentWorkSpace[pdfId]);
double sum = 0;
int i = static_cast<int>((*(sumpdf->obsBegin()))->lowerlimit);
for (auto value : values) {
sum += value;
printf("N%d [%d](%d)<%lf>-><%lf> <%lf>\n", 0, pdfId, i, i + 0.5, value * scale, sum * scale);
++i;
}
printf("N0 %lf [%d] a(%d)-b(%d)->(%lf) tot 0-%d (%lf)\n",
N0,
pdfId,
startbin,
endbin,
thrust::reduce(componentWorkSpace[pdfId]->begin() + startbin, componentWorkSpace[pdfId]->begin() + endbin) *
scale,
componentWorkSpace[pdfId]->size(),
thrust::reduce(componentWorkSpace[pdfId]->begin(), componentWorkSpace[pdfId]->end()) * scale);
#endif
++rateIdit;
}
rateIdit = rate_1.begin();
for (auto pdfId : pdf_1) {
const double scale = sumpdf->Norm() * binVolume * host_params[*rateIdit];
N1 += thrust::reduce(componentWorkSpace[pdfId]->begin() + startbin, componentWorkSpace[pdfId]->begin() + endbin) *
scale;
#ifdef MV_CHECK
thrust::host_vector<fptype> values(*componentWorkSpace[pdfId]);
double sum = 0;
int i = static_cast<int>((*(sumpdf->obsBegin()))->lowerlimit);
for (auto value : values) {
sum += value;
printf("N%d [%d](%d)<%lf>-><%lf> <%lf>\n", 1, pdfId, i, i + 0.5, value * scale, sum * scale);
++i;
}
printf("N1 %lf [%d] a(%d)-b(%d)->(%lf) tot 0-%d (%lf)\n",
N1,
pdfId,
startbin,
endbin,
thrust::reduce(componentWorkSpace[pdfId]->begin() + startbin, componentWorkSpace[pdfId]->begin() + endbin) *
scale,
componentWorkSpace[pdfId]->size(),
thrust::reduce(componentWorkSpace[pdfId]->begin(), componentWorkSpace[pdfId]->end()) * scale);
#endif
++rateIdit;
}
fptype host_m0 = N0 / (N0 + N1) * sum_k / (I0 + Nbin);
fptype host_m1 = N1 / (N0 + N1) * sum_k / (I1 + Nbin);
#ifdef NLL_CHECK
printf("n %d i %d Ni %.15le Nsum %.15le sum_k %.1lf Ii %.1lf Nbin %d\n", 2, 0, N0, N0 + N1, sum_k, I0, Nbin);
printf("n %d i %d Ni %.15le Nsum %.15le sum_k %.1lf Ii %.1lf Nbin %d\n", 2, 1, N1, N0 + N1, sum_k, I1, Nbin);
#endif
MEMCPY_TO_SYMBOL(dev_mv_m0, &host_m0, sizeof(fptype), MVid * sizeof(fptype), hipMemcpyHostToDevice);
MEMCPY_TO_SYMBOL(dev_mv_m1, &host_m1, sizeof(fptype), MVid * sizeof(fptype), hipMemcpyHostToDevice);
}
| acefc20fb8a495579e38a26f45b1a2efb55d77c1.cu | /*****************************************************************************/
// Author: Xuefeng Ding <xuefeng.ding.physics@gmail.com>
// Insitute: Gran Sasso Science Institute, L'Aquila, 67100, Italy
// Date: 2018 April 7th
// Version: v1.0
// Description: GooStats, a statistical analysis toolkit that runs on GPU.
//
// All rights reserved. 2018 copyrighted.
/*****************************************************************************/
#include "MultiVariatePdf.h"
#include "goofit/BinnedDataSet.h"
#include "goofit/Variable.h"
int MultiVariatePdf::totalPdf = 0;
MEM_CONSTANT fptype *dev_mv_k[100];
MEM_CONSTANT fptype *dev_mv_n0[100];
MEM_CONSTANT fptype *dev_mv_n1[100];
DEVICE_VECTOR<fptype> *dev_vec_mv_k[100];
DEVICE_VECTOR<fptype> *dev_vec_mv_n0[100];
DEVICE_VECTOR<fptype> *dev_vec_mv_n1[100];
MEM_CONSTANT fptype dev_mv_m0[100];
MEM_CONSTANT fptype dev_mv_m1[100];
template <MultiVariatePdf::MVLLType T>
EXEC_TARGET fptype MVLL(const fptype k, const fptype n0, const fptype n1, const fptype m0, const fptype m1);
#include "MultiVariatePdf.icc" // concrete implementation of the MultiVariate Likelihood
template <MultiVariatePdf::MVLLType T>
EXEC_TARGET fptype device_MV(fptype *evt, fptype *, unsigned int *indices) {
const fptype mv_val = evt[RO_CACHE(indices[2 + RO_CACHE(indices[0])])];
const int cIndex = RO_CACHE(indices[1]);
const fptype mv_lo = RO_CACHE(functorConstants[cIndex]);
const fptype mv_step = RO_CACHE(functorConstants[cIndex + 1]);
const int mv_bin =
(int)FLOOR((mv_val - mv_lo) / mv_step); // no problem with FLOOR: start from 0.5, which corresponse to bin=0
const int MVid = RO_CACHE(indices[2]);
const fptype m0 = RO_CACHE(dev_mv_m0[MVid]);
const fptype m1 = RO_CACHE(dev_mv_m1[MVid]);
const fptype k = RO_CACHE(dev_mv_k[MVid][mv_bin]);
const fptype n0 = RO_CACHE(dev_mv_n0[MVid][mv_bin]);
const fptype n1 = RO_CACHE(dev_mv_n1[MVid][mv_bin]);
const fptype ret = MVLL<T>(k, n0, n1, m0, m1);
//#ifdef NLL_CHECK
// printf("k %lf n0 %lf n1 %lf m0 %lf m1 %lf ret %le\n",
// fptype(k),fptype(n0),fptype(n1),m0,m1,EXP(ret));
//#endif
return -ret;
}
MEM_DEVICE device_function_ptr ptr_to_MV_StefanoDavini = device_MV<MultiVariatePdf::MVLLType::StefanoDavini>;
#include "goofit/PDFs/SumPdf.h"
const std::vector<int> MultiVariatePdf::get_pdfids(const std::vector<GooPdf *> &pdfs) {
std::vector<int> ids;
for (auto pdf : pdfs)
ids.push_back(SumPdf::registerFunc(static_cast<PdfBase *>(pdf)));
return ids;
}
const std::vector<int> MultiVariatePdf::get_Nids(const std::vector<Variable *> &rates) {
std::vector<int> ids;
for (auto rate : rates)
ids.push_back(registerParameter(rate));
return ids;
}
MultiVariatePdf::MultiVariatePdf(std::string n,
MVLLType MVLLtype,
Variable *mv_var,
BinnedDataSet *data,
const std::vector<BinnedDataSet *> &refs,
const std::vector<GooPdf *> &pdf_0_,
const std::vector<GooPdf *> &pdf_1_,
const std::vector<Variable *> &rate_0_,
const std::vector<Variable *> &rate_1_,
int startbin_,
int endbin_ /*startbin<=bin<endbin*/,
const SumPdf *sumpdf_,
double binVolume_)
: GooPdf(mv_var, n),
pdf_0(get_pdfids(pdf_0_)),
pdf_1(get_pdfids(pdf_1_)),
rate_0(get_Nids(rate_0_)),
rate_1(get_Nids(rate_1_)),
binVolume(binVolume_),
sumpdf(sumpdf_),
MVid(totalPdf++),
sum_k(-99),
I0(-99),
I1(-99),
Nbin(data->getNumBins()),
startbin(startbin_ - static_cast<int>((*(sumpdf->obsCBegin()))->lowerlimit)),
endbin(endbin_ - static_cast<int>((*(sumpdf->obsCBegin()))->lowerlimit)),
dev_iConsts(0LL) {
copyTH1DToGPU(data, refs);
std::vector<unsigned int> pindices;
pindices.push_back(registerConstants(2));
pindices.push_back(MVid /* index of the dn_histo used by this pdf*/); // 1
switch (MVLLtype) {
case MVLLType::StefanoDavini:
GET_FUNCTION_ADDR(ptr_to_MV_StefanoDavini);
break;
default:
abortWithCudaPrintFlush(__FILE__, __LINE__, getName() + " unknown MVLLtype", this);
}
initialise(pindices);
gooMalloc((void **)&dev_iConsts, 3 * sizeof(fptype));
fptype host_iConsts[3];
host_iConsts[0] = mv_var->lowerlimit;
host_iConsts[1] = (mv_var->upperlimit - mv_var->lowerlimit) / mv_var->numbins;
MEMCPY_TO_SYMBOL(
functorConstants,
host_iConsts,
2 * sizeof(fptype),
cIndex * sizeof(fptype),
cudaMemcpyHostToDevice); // cIndex is a member derived from PdfBase and is set inside registerConstants method
host_iConsts[1] = mv_var->upperlimit;
host_iConsts[2] = mv_var->numbins;
MEMCPY(dev_iConsts, host_iConsts, 3 * sizeof(fptype), cudaMemcpyHostToDevice);
}
void MultiVariatePdf::copyTH1DToGPU(BinnedDataSet *data, const std::vector<BinnedDataSet *> &refs) {
fptype *dev_address[1];
copyTH1DToGPU(data, sum_k, dev_address, dev_vec_mv_k[MVid]);
MEMCPY_TO_SYMBOL(dev_mv_k, dev_address, sizeof(fptype *), MVid * sizeof(fptype *), cudaMemcpyHostToDevice);
copyTH1DToGPU(refs.at(0), I0, dev_address, dev_vec_mv_n0[MVid]);
MEMCPY_TO_SYMBOL(dev_mv_n0, dev_address, sizeof(fptype *), MVid * sizeof(fptype *), cudaMemcpyHostToDevice);
copyTH1DToGPU(refs.at(1), I1, dev_address, dev_vec_mv_n1[MVid]);
MEMCPY_TO_SYMBOL(dev_mv_n1, dev_address, sizeof(fptype *), MVid * sizeof(fptype *), cudaMemcpyHostToDevice);
}
void MultiVariatePdf::copyTH1DToGPU(BinnedDataSet *data,
fptype &sum,
fptype *dev_address[1],
DEVICE_VECTOR<fptype> *&dev_vec_address) {
thrust::host_vector<fptype> host_histogram;
unsigned int numbins = data->getNumBins();
sum = 0;
for (unsigned int i = 0; i < numbins; ++i) {
fptype curr = data->getBinContent(i);
sum += curr;
host_histogram.push_back(curr); // warning: you should normalize the histogram yourself.
}
dev_vec_address = new DEVICE_VECTOR<fptype>(host_histogram);
dev_address[0] = thrust::raw_pointer_cast(dev_vec_address->data());
}
__host__ fptype MultiVariatePdf::normalise() const {
sumpdf->normalise();
host_normalisation[parameters] = 1.0;
return 1;
}
__host__ double MultiVariatePdf::calculateNLL() const {
if (IsChisquareFit())
return 0;
return GooPdf::calculateNLL();
}
__host__ fptype MultiVariatePdf::sumOfNll(int __attribute__((__unused__)) numVars) const {
static fptype logL;
//if (sumpdf->updated()) {
calculate_m0m1();
static thrust::plus<fptype> cudaPlus;
thrust::counting_iterator<int> binIndex(0);
thrust::constant_iterator<fptype *> startendstep(dev_iConsts); // 3*fptype lo, hi and step for npe
thrust::constant_iterator<int> eventSize(1); // 1: only npe
fptype dummy = 0;
BinnedMetricTaker modalor(const_cast<MultiVariatePdf *>(this), getMetricPointer("ptr_to_Eval"));
logL =
thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(binIndex, eventSize, startendstep)),
thrust::make_zip_iterator(thrust::make_tuple(binIndex + Nbin, eventSize, startendstep)),
modalor,
dummy,
cudaPlus);
//}
#ifdef NLL_CHECK
DEVICE_VECTOR<fptype> dev_logLs(Nbin);
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(binIndex, eventSize, startendstep)),
thrust::make_zip_iterator(thrust::make_tuple(binIndex + Nbin, eventSize, startendstep)),
dev_logLs.begin(),
modalor);
thrust::host_vector<fptype> logLs(dev_logLs);
thrust::host_vector<fptype> k(*dev_vec_mv_k[MVid]);
thrust::host_vector<fptype> n0(*dev_vec_mv_n0[MVid]);
thrust::host_vector<fptype> n1(*dev_vec_mv_n1[MVid]);
double sum = 0;
const Variable *mv_var = *obsCBegin();
double lo = mv_var->lowerlimit;
double de = (mv_var->upperlimit - lo) / mv_var->numbins;
fptype m0, m1;
MEMCPY_FROM_SYMBOL(&m0, dev_mv_m0, sizeof(fptype), MVid * sizeof(fptype), cudaMemcpyDeviceToHost);
MEMCPY_FROM_SYMBOL(&m1, dev_mv_m1, sizeof(fptype), MVid * sizeof(fptype), cudaMemcpyDeviceToHost);
for (unsigned int i = 0; i < logLs.size(); ++i) {
sum += logLs[i];
printf("log(L)MV %.15le e %lf user %lf\n k %.2lf n0 %.2lf n1 %.2lf m0 %.10le m1 %.10le L %.15le\n",
sum,
(startbin + endbin) / 2. + static_cast<int>((*(sumpdf->obsCBegin()))->lowerlimit),
(i + 0.5) * de + lo,
k[i],
n0[i],
n1[i],
m0,
m1,
logLs[i]);
}
#endif
return logL;
}
void MultiVariatePdf::calculate_m0m1() const {
fptype N0 = 0, N1 = 0;
auto rateIdit = rate_0.begin();
for (auto pdfId : pdf_0) {
const double scale = sumpdf->Norm() * binVolume * host_params[*rateIdit];
N0 += thrust::reduce(componentWorkSpace[pdfId]->begin() + startbin, componentWorkSpace[pdfId]->begin() + endbin) *
scale;
#ifdef MV_CHECK
thrust::host_vector<fptype> values(*componentWorkSpace[pdfId]);
double sum = 0;
int i = static_cast<int>((*(sumpdf->obsBegin()))->lowerlimit);
for (auto value : values) {
sum += value;
printf("N%d [%d](%d)<%lf>-><%lf> <%lf>\n", 0, pdfId, i, i + 0.5, value * scale, sum * scale);
++i;
}
printf("N0 %lf [%d] a(%d)-b(%d)->(%lf) tot 0-%d (%lf)\n",
N0,
pdfId,
startbin,
endbin,
thrust::reduce(componentWorkSpace[pdfId]->begin() + startbin, componentWorkSpace[pdfId]->begin() + endbin) *
scale,
componentWorkSpace[pdfId]->size(),
thrust::reduce(componentWorkSpace[pdfId]->begin(), componentWorkSpace[pdfId]->end()) * scale);
#endif
++rateIdit;
}
rateIdit = rate_1.begin();
for (auto pdfId : pdf_1) {
const double scale = sumpdf->Norm() * binVolume * host_params[*rateIdit];
N1 += thrust::reduce(componentWorkSpace[pdfId]->begin() + startbin, componentWorkSpace[pdfId]->begin() + endbin) *
scale;
#ifdef MV_CHECK
thrust::host_vector<fptype> values(*componentWorkSpace[pdfId]);
double sum = 0;
int i = static_cast<int>((*(sumpdf->obsBegin()))->lowerlimit);
for (auto value : values) {
sum += value;
printf("N%d [%d](%d)<%lf>-><%lf> <%lf>\n", 1, pdfId, i, i + 0.5, value * scale, sum * scale);
++i;
}
printf("N1 %lf [%d] a(%d)-b(%d)->(%lf) tot 0-%d (%lf)\n",
N1,
pdfId,
startbin,
endbin,
thrust::reduce(componentWorkSpace[pdfId]->begin() + startbin, componentWorkSpace[pdfId]->begin() + endbin) *
scale,
componentWorkSpace[pdfId]->size(),
thrust::reduce(componentWorkSpace[pdfId]->begin(), componentWorkSpace[pdfId]->end()) * scale);
#endif
++rateIdit;
}
fptype host_m0 = N0 / (N0 + N1) * sum_k / (I0 + Nbin);
fptype host_m1 = N1 / (N0 + N1) * sum_k / (I1 + Nbin);
#ifdef NLL_CHECK
printf("n %d i %d Ni %.15le Nsum %.15le sum_k %.1lf Ii %.1lf Nbin %d\n", 2, 0, N0, N0 + N1, sum_k, I0, Nbin);
printf("n %d i %d Ni %.15le Nsum %.15le sum_k %.1lf Ii %.1lf Nbin %d\n", 2, 1, N1, N0 + N1, sum_k, I1, Nbin);
#endif
MEMCPY_TO_SYMBOL(dev_mv_m0, &host_m0, sizeof(fptype), MVid * sizeof(fptype), cudaMemcpyHostToDevice);
MEMCPY_TO_SYMBOL(dev_mv_m1, &host_m1, sizeof(fptype), MVid * sizeof(fptype), cudaMemcpyHostToDevice);
}
|
a7f45082458bf2151dd13a050275a29c6d14d0cf.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <common/device_buffer.hpp>
#include <distance/epsilon_neighborhood.cuh>
#include <random/make_blobs.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Distance {
template <typename T, typename IdxT>
struct EpsInputs {
IdxT n_row, n_col, n_centers, n_batches;
T eps;
};
template <typename T, typename IdxT>
::std::ostream& operator<<(::std::ostream& os, const EpsInputs<T, IdxT>& p) {
return os;
}
template <typename T, typename IdxT>
class EpsNeighTest : public ::testing::TestWithParam<EpsInputs<T, IdxT>> {
protected:
void SetUp() override {
param = ::testing::TestWithParam<EpsInputs<T, IdxT>>::GetParam();
CUDA_CHECK(hipStreamCreate(&stream));
allocate(data, param.n_row * param.n_col);
allocate(labels, param.n_row);
batchSize = param.n_row / param.n_batches;
allocate(adj, param.n_row * batchSize);
allocate(vd, batchSize + 1, true);
allocator.reset(new defaultDeviceAllocator);
Random::make_blobs<T, IdxT>(data, labels, param.n_row, param.n_col,
param.n_centers, allocator, stream, true,
nullptr, nullptr, T(0.01), false);
}
void TearDown() override {
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(adj));
CUDA_CHECK(hipFree(vd));
}
EpsInputs<T, IdxT> param;
hipStream_t stream;
T* data;
bool* adj;
IdxT *labels, *vd;
IdxT batchSize;
std::shared_ptr<deviceAllocator> allocator;
}; // class EpsNeighTest
const std::vector<EpsInputs<float, int>> inputsfi = {
{15000, 16, 5, 1, 2.f}, {14000, 16, 5, 1, 2.f},
{15000, 17, 5, 1, 2.f}, {14000, 17, 5, 1, 2.f},
{15000, 18, 5, 1, 2.f}, {14000, 18, 5, 1, 2.f},
{15000, 32, 5, 1, 2.f}, {14000, 32, 5, 1, 2.f},
{20000, 10000, 10, 1, 2.f}, {20000, 10000, 10, 2, 2.f},
};
typedef EpsNeighTest<float, int> EpsNeighTestFI;
TEST_P(EpsNeighTestFI, Result) {
for (int i = 0; i < param.n_batches; ++i) {
CUDA_CHECK(
hipMemsetAsync(adj, 0, sizeof(bool) * param.n_row * batchSize, stream));
CUDA_CHECK(hipMemsetAsync(vd, 0, sizeof(int) * (batchSize + 1), stream));
epsUnexpL2SqNeighborhood<float, int>(
adj, vd, data, data + (i * batchSize * param.n_col), param.n_row,
batchSize, param.n_col, param.eps * param.eps, stream);
ASSERT_TRUE(devArrMatch(param.n_row / param.n_centers, vd, batchSize,
Compare<int>(), stream));
}
}
INSTANTIATE_TEST_CASE_P(EpsNeighTests, EpsNeighTestFI,
::testing::ValuesIn(inputsfi));
}; // namespace Distance
}; // namespace MLCommon
| a7f45082458bf2151dd13a050275a29c6d14d0cf.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <common/device_buffer.hpp>
#include <distance/epsilon_neighborhood.cuh>
#include <random/make_blobs.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Distance {
template <typename T, typename IdxT>
struct EpsInputs {
IdxT n_row, n_col, n_centers, n_batches;
T eps;
};
template <typename T, typename IdxT>
::std::ostream& operator<<(::std::ostream& os, const EpsInputs<T, IdxT>& p) {
return os;
}
template <typename T, typename IdxT>
class EpsNeighTest : public ::testing::TestWithParam<EpsInputs<T, IdxT>> {
protected:
void SetUp() override {
param = ::testing::TestWithParam<EpsInputs<T, IdxT>>::GetParam();
CUDA_CHECK(cudaStreamCreate(&stream));
allocate(data, param.n_row * param.n_col);
allocate(labels, param.n_row);
batchSize = param.n_row / param.n_batches;
allocate(adj, param.n_row * batchSize);
allocate(vd, batchSize + 1, true);
allocator.reset(new defaultDeviceAllocator);
Random::make_blobs<T, IdxT>(data, labels, param.n_row, param.n_col,
param.n_centers, allocator, stream, true,
nullptr, nullptr, T(0.01), false);
}
void TearDown() override {
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(adj));
CUDA_CHECK(cudaFree(vd));
}
EpsInputs<T, IdxT> param;
cudaStream_t stream;
T* data;
bool* adj;
IdxT *labels, *vd;
IdxT batchSize;
std::shared_ptr<deviceAllocator> allocator;
}; // class EpsNeighTest
const std::vector<EpsInputs<float, int>> inputsfi = {
{15000, 16, 5, 1, 2.f}, {14000, 16, 5, 1, 2.f},
{15000, 17, 5, 1, 2.f}, {14000, 17, 5, 1, 2.f},
{15000, 18, 5, 1, 2.f}, {14000, 18, 5, 1, 2.f},
{15000, 32, 5, 1, 2.f}, {14000, 32, 5, 1, 2.f},
{20000, 10000, 10, 1, 2.f}, {20000, 10000, 10, 2, 2.f},
};
typedef EpsNeighTest<float, int> EpsNeighTestFI;
TEST_P(EpsNeighTestFI, Result) {
for (int i = 0; i < param.n_batches; ++i) {
CUDA_CHECK(
cudaMemsetAsync(adj, 0, sizeof(bool) * param.n_row * batchSize, stream));
CUDA_CHECK(cudaMemsetAsync(vd, 0, sizeof(int) * (batchSize + 1), stream));
epsUnexpL2SqNeighborhood<float, int>(
adj, vd, data, data + (i * batchSize * param.n_col), param.n_row,
batchSize, param.n_col, param.eps * param.eps, stream);
ASSERT_TRUE(devArrMatch(param.n_row / param.n_centers, vd, batchSize,
Compare<int>(), stream));
}
}
INSTANTIATE_TEST_CASE_P(EpsNeighTests, EpsNeighTestFI,
::testing::ValuesIn(inputsfi));
}; // namespace Distance
}; // namespace MLCommon
|
32b2f4b93631a0c50ad4794d9211424040b5e549.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// cudamatrix/cu-kernels.cu
// Copyright 2009-2012 Karel Vesely
// 2013 Ehsan Variani
// 2013 Johns Hopkins University (author: Daniel Povey)
// 2013 Hainan Xu
// 2013 Xiaohui Zhang
// 2013 Johns Hopkins University (author: Guoguo Chen)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
// In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers
#include <cfloat>
#include "cu-kernels-ansi.h"
/***********************************************************************
* Generic __device__ functions
*/
template<typename Real>
__device__
static Real _sum_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (sum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x >= halfPoint) { // was <
// Get the shared value stored by another thread
Real temp = 0.0;
if(threadIdx.x < nTotalThreads) { // was +halfPoint
temp = buffer[threadIdx.x]; // was +halfPoint
}
buffer[threadIdx.x - halfPoint] += temp;
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static Real _min_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (min)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active
if (threadIdx.x < halfPoint) {
if (threadIdx.x + halfPoint < nTotalThreads) {
Real temp = buffer[threadIdx.x + halfPoint];
if (temp < buffer[threadIdx.x])
buffer[threadIdx.x] = temp;
}
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static Real _max_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (max)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
if(threadIdx.x+halfPoint < nTotalThreads) {
Real temp = buffer[threadIdx.x + halfPoint];
if (temp > buffer[threadIdx.x])
buffer[threadIdx.x] = temp;
}
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (get index of maximum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = -1e20;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = val[idx[threadIdx.x + halfPoint]];
}
if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint];
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return idx[0];
}
/***********************************************************************
* CUDA kernels
* the functions are templated to have the float/double operations
*/
/*
* CuMatrix
*/
template<typename Real>
__global__
static void _copy_low_upp(Real* A, MatrixDim dimA) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i <= j || i >= dimA.rows) return;
int index_1 = i * dimA.stride + j;
int index_2 = j * dimA.stride + i;
A[index_2] = A[index_1];
}
template<typename Real>
__global__
static void _copy_upp_low(Real* A, MatrixDim dimA) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j <= i || j >= dimA.rows) return;
int index_1 = i * dimA.stride + j;
int index_2 = j * dimA.stride + i;
A[index_2] = A[index_1];
}
// mat += diag(vec) * mat2.
template<typename Real>
__global__
static void _add_diag_vec_mat(Real alpha, Real *mat, MatrixDim mat_dim,
const Real *vec, const Real *mat2, int mat2_row_stride,
int mat2_col_stride, Real beta) {
// Note from Dan: in this kernel, we make the x dimension correspond to the
// row index and y to the column index. That was not always the case for
// earlier kernels written by others.
int i = blockIdx.y * blockDim.y + threadIdx.y; // row index
int j = blockIdx.x * blockDim.x + threadIdx.x; // column index
int index = i * mat_dim.stride + j,
index2 = i * mat2_row_stride + j * mat2_col_stride;
if (i < mat_dim.rows && j < mat_dim.cols) {
mat[index] = alpha * vec[i] * mat2[index2] + beta * mat[index];
}
}
template<typename Real, typename OtherReal>
__global__
static void _copy_from_tp(Real* A, const OtherReal* B, MatrixDim dmat) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dmat.rows && j < dmat.cols) {
int32_cuda index_B = (i * (i+1) / 2) + j;
int32_cuda index_A = i * dmat.stride + j;
if (j <= i) {
A[index_A] = B[index_B];
} else {
A[index_A] = 0.0;
}
}
}
template<typename Real, typename OtherReal>
__global__
static void _copy_from_tp_trans(Real* A, const OtherReal* B, MatrixDim dmat) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
// transpose the indices used to index the source TpMatrix.
if (i < dmat.rows && j < dmat.cols) {
int32_cuda index_B = (j * (j+1) / 2) + i;
int32_cuda index_A = i * dmat.stride + j;
if (i <= j) {
A[index_A] = B[index_B];
} else {
A[index_A] = 0.0;
}
}
}
// for this kernel, following the newer pattern, the x-dim is the row-index, the
// y-dim is the col-index.
template<typename Real, typename OtherReal>
__global__
static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index.
int32_cuda index_out = j + i * d_out.stride;
int32_cuda index_in = j + i * d_in.stride;
if (i < d_out.rows && j < d_out.cols)
mat_out[index_out] = static_cast<Real>(mat_in[index_in]);
}
// for this kernel, the x-dim is the row-index at the output, the y-dim is the
// col-index at the output
template<typename Real, typename OtherReal>
__global__
static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index out
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index out
int32_cuda index_out = j + i * d_out.stride;
int32_cuda index_in = i + j * d_in.stride;
if (i < d_out.rows && j < d_out.cols)
mat_out[index_out] = static_cast<Real>(mat_in[index_in]);
}
template<typename Real>
__global__
static void _transpose_matrix(Real* mat, MatrixDim d) {
// Transposes a square matrix in-place.
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index
if (j >= i || i >= d.rows) { return; } // Only half the threads act.
int32_cuda index_a = j + i * d.stride,
index_b = i + j * d.stride;
Real a = mat[index_a], b = mat[index_b];
mat[index_a] = b;
mat[index_b] = a;
}
template<typename Real>
__global__
static void _copy_col_from_vec(Real* mat, const Real* v, int col, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < d.rows ) {
int32_cuda index = col + i * d.stride;
mat[index] = v[i];
}
}
template<typename Real>
__global__
static void _apply_exp(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if ( i < d.cols && j < d.rows ) {
mat[index] = exp(mat[index]);
}
}
template<typename Real>
__global__
static void _scale_diag(Real* mat, Real value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i+1)*(i+2)/2) - 1;
if ( i < dim ) {
mat[index] = value * mat[index];
}
}
template<typename Real>
__global__
static void _set_diag(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = i + i*d.stride;
if ( i < d.rows ) {
mat[index] = 1;
}
}
template<typename Real>
__global__
static void _set_diag_packed(Real* mat, Real value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i+1)*(i+2)/2) - 1;
if ( i < dim ) {
mat[index] = value;
}
}
template<typename Real>
__global__
static void _add_diag_packed(Real* mat, Real value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i+1)*(i+2)/2) - 1;
if ( i < dim ) {
mat[index] = mat[index] + value;
}
}
template<typename Real>
__global__
static void _set_const(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = value;
}
template<typename Real>
__global__
static void _set_zero_above_diag(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < i)
mat[index] = 0.0;
}
template<typename Real>
__global__
static void _add(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
mat[index] = mat[index] + value;
}
template<typename Real>
__global__
static void _scale(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
mat[index] = mat[index] * value;
}
template<typename Real>
__global__
static void _apply_log(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
mat[index] = log(mat[index]);
}
template<typename Real>
__global__
static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride;
if (i < dst_d.cols && j < dst_d.rows)
mat[dst_index] = mat[dst_index] * A[src_index];
}
template<typename Real>
__global__
static void _max(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride;
if ( i < dst_d.cols && j < dst_d.rows ) {
Real a = mat[dst_index], b = A[src_index];
mat[dst_index] = (a > b ? a : b);
}
}
template<typename Real>
__global__
static void _vec_mul_elements(Real* v, const Real* a, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim)
v[i] = v[i] * a[i];
}
template<typename Real>
__global__
static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
mat[index] *= scale[i];
}
template<typename Real>
__global__
static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
mat[index] *= scale[j];
}
template<typename Real>
__global__
static void _mul_rows_group_mat(Real *y, const Real *x, MatrixDim d,
int src_stride, int group_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < d.rows && i < d.cols ) {
int dst_index = i + j * d.stride;
int src_index = i / group_size + j * src_stride;
y[dst_index] *= x[src_index];
}
}
/// y is the derivative we will output; vec is the input we're computing
/// the group p-norm on, "norm" is the previously computed group p-norm.
template<typename Real>
__global__
static void _calc_pnorm_deriv(Real *deriv, const Real *vec, const Real *norm,
MatrixDim d, int src_stride, int group_size, Real power) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < d.rows && i < d.cols ) {
int dst_index = i + j * d.stride,
src_index = i / group_size + j * src_stride;
Real vec_element = vec[dst_index], // this is the element of the original vector.
norm_element = norm[src_index]; // this is the pnorm
Real vec_element_sign = (vec_element > 0 ? 1 : -1);
Real ans;
if (norm_element <= 0.0) ans = 0.0; // The derivative is either zero or undefined at the origin.
else ans = vec_element_sign * pow(std::abs(vec_element), power - 1) *
pow(norm_element, 1 - power);
deriv[dst_index] = ans;
}
}
/// Set each element to y = (x == orig ? changed : x).
template<typename Real>
__global__
static void _replace_value(Real *vec, int dim, Real orig, Real changed) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim)
if (vec[i] == orig) vec[i] = changed;
}
template<typename Real>
__global__
static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (j >= d.rows ) return;
//invert divider in shared memory
__shared__ Real inv[16];
if(threadIdx.x==0) {
inv[threadIdx.y] = 1.0/vec_div[j];
}
__syncthreads();
//multiply elements
if (i < d.cols && j < d.rows)
mat[index] *= inv[threadIdx.y];
}
template<typename Real>
__global__
static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
int32_cuda index_src = i + j*src_stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha*src[index_src] + dst[index];
}
template<typename Real>
__global__
static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j *d.stride;
int32_cuda index_src = j + i*src_stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha*src[index_src] + dst[index];
}
template<typename Real>
__global__
static void _add_mat_mat_div_mat(const Real* A, const Real* B, const Real* C, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
if (C[index] == 0)
dst[index] = A[index];
else
dst[index] = A[index] * B[index] / C[index];
}
// Given a matrix input S (not packed!) and a lower-triangular matrix L,
// this function does S = beta S + alpha * L^T L. This is used in PSD matrix inversion.
// The i index is the row of the destination S and the j the column (although of
// course the output is symmetric so it doesn't matter in a sense). The main point
// of this is to make use of various symmetries and zero-ness.
template<typename Real>
__global__
static void _sy_add_tr2(Real alpha, Real beta, const Real *T, MatrixDim tdim, Real *S,
MatrixDim sdim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= sdim.rows || j > i) return;
// this thread computes the dot-product of the i'th column of
// L with the j'th column of L. The values we're multiplying
// are only nonzero for row-index k greater or equal to
// max(i, j), which equals i.
Real sum = 0.0;
for (int k = i; k < sdim.rows; k++) {
int i_index = i + tdim.stride * k,
j_index = j + tdim.stride * k;
sum += T[i_index] * T[j_index];
}
int output_index1 = i * sdim.stride + j,
output_index2 = j * sdim.stride + i;
S[output_index1] = alpha * sum + beta * S[output_index1];
S[output_index2] = alpha * sum + beta * S[output_index2];
}
template<typename Real>
__global__
static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha*col[j] + beta*dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha*row[i] + beta*dst[index];
}
template<typename Real>
__global__
static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*dmat.stride;
int32_cuda index2 = i + j*dmask.stride;
if ( i < dmat.cols && j < dmat.rows )
if(mask[index2] == 0) mat[index] = 0;
}
/*
* CuVector
*/
// very limited application!
template<typename Real>
__global__
static void _set_bias_params(Real* v, const Real* a, Real param_1, Real param_2, Real param_3, int* flag, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < dim ) {
Real ratio = a[i] / param_3;
if ( ( ratio < 0.0 ) || ( ratio >= 1.01 )) {
*flag = 1;
return;
}
if ( ratio < param_1 ) {
Real factor = ((param_1/ratio) > param_2) ? param_2 : (param_1/ratio);
v[i] = v[i] / factor;
} else if ( ratio > param_1 ) {
Real factor = ((ratio/param_1) > param_2) ? param_2 : (ratio/param_1);
v[i] = v[i] * factor;
}
}
}
template<typename Real>
__global__
static void _copy_from_vec_df(double* v_out, const Real* v_in, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
v_out[i] = (double) v_in[i];
}
}
// This kernel writes a copy of the vector "v_in" to each row of the matrix
// "m_out". the dimension of v_in should be equal to the #columns of m_out. In
// this kernel, following the new pattern, x corresponds to row-index and y to
// column-index.
template<typename Real>
__global__
static void _copy_rows_from_vec(Real* m_out, MatrixDim d, const Real* v_in) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row index.
int j = blockIdx.y * blockDim.y + threadIdx.y; // column index.
if (i < d.rows && j < d.cols) {
int index = i * d.stride + j;
m_out[index] = v_in[j];
}
}
template<typename Real>
__global__
static void _copy_from_vec_fd(float* v_out, const Real* v_in, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if ( i < dim) {
v_out[i] = (float) v_in[i];
}
}
template<typename Real>
__global__
static void _vec_min(const Real* v, Real* value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= CU1DBLOCK) return;
__shared__ Real row_data[CU1DBLOCK];
int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK;
Real min = 1.0 / 0.0; // infinity.
for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) {
Real v_j = v[j];
if (v_j < min) min = v_j;
}
row_data[i] = min;
__syncthreads();
//get the sum
*value = _min_reduce(row_data);
}
template<typename Real>
__global__
static void _vec_max(const Real* v, Real* value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if(blockIdx.y > 0) return;
__shared__ Real row_data[CU1DBLOCK];
if(i >= CU1DBLOCK) return;
int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK;
Real max = -1.0 / 0.0; // -infinity.
for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) {
Real v_j = v[j];
if (v_j > max) max = v_j;
}
row_data[i] = max;
__syncthreads();
//get the sum
*value = _max_reduce(row_data);
}
// _trace_mat_mat expects to be called with 1 blocks, each of dimension
// CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x],
// i.e. value[0 through 0].
template<typename Real, int num_blocks>
__global__
static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return;
int num_elements = dA.rows * dA.cols,
num_threads = CU1DBLOCK * num_blocks;
int block_size = (num_elements + num_threads - 1) / num_threads;
int loop_start = i * block_size, loop_end = (i + 1) * block_size;
if (loop_end > num_elements)
loop_end = num_elements;
Real sum = 0.0;
for (int j = loop_start; j < loop_end; j++) {
// for (int j = i; j < num_elements; j += num_threads) {
int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is
// col-index in A; in B, it's reversed.
int index_A = col + row * dA.stride,
index_B = row + col * B_stride;
sum += A[index_A] * B[index_B];
}
__shared__ Real row_data[CU1DBLOCK];
row_data[threadIdx.x] = sum;
__syncthreads();
Real ans = _sum_reduce(row_data);
if (threadIdx.x == 0)
value[blockIdx.x] = ans;
}
// _trace_mat_mat_trans expects to be called with 4 blocks, each of dimension
// CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x],
// i.e. value[0 through 3].
template<typename Real, int num_blocks>
__global__
static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return;
int num_elements = dA.rows * dA.cols,
num_threads = CU1DBLOCK * num_blocks;
// int block_size = (num_elements + num_threads - 1) / num_threads;
// int loop_start = i * block_size, loop_end = (i + 1) * block_size;
// if (loop_end > num_elements)
// loop_end = num_elements;
Real sum = 0.0;
// for (int j = loop_start; j < loop_end; j++) {
for (int j = i; j < num_elements; j += num_threads) {
int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is
// col-index in A; in B, it's reversed.
int index_A = col + row * dA.stride,
index_B = col + row * B_stride;
sum += A[index_A] * B[index_B];
}
__shared__ Real row_data[CU1DBLOCK];
row_data[threadIdx.x] = sum;
__syncthreads();
Real ans = _sum_reduce(row_data);
if (threadIdx.x == 0)
value[blockIdx.x] = ans;
}
// Adds diag(M N) to v, where M and N are matrices. We supply row_stride and
// col_stride arguments for M and N, and swapping them allows us to transpose
// those matrices. Note: we imagine row-major indexing here, just like Kaldi
// and CBLAS (but unlike CUBLAS).
// This kernel expects the blockDim to be (CU1DBLOCK, 1) and the
// gridDim times CU1DBLOCK to be at least num-rows-of-v * threads_per_element.
// threads_per_element should be a power of 2.
template<typename Real>
__global__
static void _add_diag_mat_mat(
Real alpha, Real* v, int v_dim, const Real* M, int M_cols, int M_row_stride,
int M_col_stride, const Real *N, int N_row_stride, int N_col_stride,
int threads_per_element, Real beta) {
// we actually assume blockDim.x == CU1DBLOCK here.
// Each diagonal element of v is processed by "threads_per_element" threads.
__shared__ Real temp_data[CU1DBLOCK];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int v_idx = i / threads_per_element, // v_idx is the index into v that we are supposed to
sub_idx = i % threads_per_element; // add to; 0 <= sub_idx < threads_per_element tells
// us which block of elements we sum up.
if (v_idx >= v_dim) return;
Real sum = 0.0;
for (int j = sub_idx; j < M_cols; j += threads_per_element) {
int M_index = v_idx * M_row_stride + j * M_col_stride,
N_index = j * N_row_stride + v_idx * N_col_stride;
sum += M[M_index] * N[N_index];
}
temp_data[threadIdx.x] = sum;
// start_idx = threadIdx.x - sub_idx; // start of the position in temp_data
// that we want to sum up.
// The following is a tree-based reduction of the elements of temp_data from
// start_idx to start_idx + threads_per_element - 1; our own index is "sub_idx".
__syncthreads();
int num_total_threads = threads_per_element;
while (num_total_threads > 1) {
int half_point = ((1 + num_total_threads) >> 1);
if (sub_idx < half_point) {
Real temp = 0.0;
if (sub_idx + half_point < num_total_threads) {
temp = temp_data[threadIdx.x + half_point];
}
temp_data[threadIdx.x] += temp;
}
__syncthreads();
num_total_threads = half_point;
}
if (sub_idx == 0) {
v[v_idx] = beta * v[v_idx] + alpha * temp_data[threadIdx.x];
}
}
template<typename Real>
__global__
static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y, Real beta, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = alpha * x[i] * y[i] + beta * v[i];
}
template<typename Real>
__global__
static void _copy_col_from_mat(Real* v, int col, const Real* mat, MatrixDim dmat, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = col + i * dmat.stride;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = mat[index];
}
template<typename Real>
__global__
static void _copy_col_from_mat_df(double* v, int col, const Real* mat, MatrixDim dmat, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = col + i * dmat.stride;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = (double) mat[index];
}
template<typename Real>
__global__
static void _copy_col_from_mat_fd(float* v, int col, const Real* mat, MatrixDim dmat, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = col + i * dmat.stride;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = (float) mat[index];
}
template<typename Real>
__global__
static void _vec_apply_exp(Real* v, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
v[i] = exp(v[i]);
}
}
template<typename Real>
__global__
static void _vec_apply_log(Real* v, Real* flag, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
if (v[i] < 0) {
*flag = 1;
return;
}
v[i] = log(v[i]);
}
}
template<typename Real>
__global__
static void _cuda_comp_obj_deriv(MatrixElement<Real> *x, int s, const Real* z, MatrixDim d, Real* z2, MatrixDim d2, Real* t) {
int i = threadIdx.x;
__shared__ Real tot_objf[CU1DBLOCK];
__shared__ Real tot_weight[CU1DBLOCK];
Real tmp_weight_sum = 0;
Real tmp_tot_objf = 0;
int size = s / CU1DBLOCK; //the least size in a loop (later part)
int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1
int loop_start;
int loop_end;
if(i < threshold) {
loop_start = i * (size + 1);
loop_end = (i+1) * (size + 1);
}
else {
loop_start = threshold + i*size;
loop_end = threshold + (i+1)*size;
}
for(int j = loop_start; j< loop_end; j++) {
int m = (x + j)->row; //* ((int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )) );
int label = (x + j)->column; //*(int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )+ sizeof(int));
Real weight = (x + j)->weight; //*(Real*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) ) + 2 * sizeof(int));
tmp_weight_sum += weight;
Real this_prob = *(z + m * d.stride + label);
tmp_tot_objf += weight * log(this_prob);
*(z2 + m * d2.stride + label ) += weight / this_prob;// there might be problems here....
}
tot_objf[i] = tmp_tot_objf;
tot_weight[i] = tmp_weight_sum;
__syncthreads();
*t = _sum_reduce(tot_objf);
__syncthreads();
*(t+1) = _sum_reduce(tot_weight);
return;
}
template<typename Real>
__global__
static void _cuda_matrix_add_elements(Real *data, MatrixDim dim, Real alpha, MatrixElement<Real>* x, int s) {
int i = threadIdx.x;
if (i >= s)
return;
int size = s / CU1DBLOCK; //the least size in a loop (later part)
int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1
int loop_start;
int loop_end;
if(i < threshold) {
loop_start = i * (size + 1);
loop_end = (i+1) * (size + 1);
}
else {
loop_start = threshold + i*size;
loop_end = threshold + (i+1)*size;
}
for(int j = loop_start; j < loop_end; j++) {
*(data + x[j].row * dim.stride + x[j].column) += alpha * x[j].weight;
}
}
template<typename Real>
__global__
static void _matrix_lookup(const Real *data, MatrixDim dim,
const Int32Pair *indices,
int indices_size, Real *output) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= dim.rows || col >= dim.cols)
return;
for (int i = 0; i < indices_size; ++i) {
if (row == indices[i].first && col == indices[i].second) {
output[i] = data[row * dim.stride + col];
}
}
}
template<typename Real>
__global__
static void _equal_element_mask(const Real *mat1, const Real *mat2, Real *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; //col
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; //row
int32_cuda index_mat1 = i + j*mat1_dim.stride;
int32_cuda index_mat2 = i + j*mat2_stride;
int32_cuda index_mask = i + j*mask_stride;
if (i < mat1_dim.cols && j < mat1_dim.rows)
mask[index_mask] = (mat1[index_mat1] == mat2[index_mat2] ? 1.0 : 0.0);
}
template<typename Real>
__global__
static void _vec_sum(Real *v, Real *sum, int dim, int inc) {
int i = threadIdx.x;
__shared__ Real row_data[CU1DBLOCK];
if (i >= CU1DBLOCK) return;
Real tmp_sum = 0;
int size = dim / CU1DBLOCK; //the least size in a loop (later part)
int threshold = dim - size * CU1DBLOCK; //any loop below this number would + 1
int loop_start;
int loop_end;
if(i < threshold) {
loop_start = i * (size + 1);
loop_end = (i+1) * (size + 1);
}
else {
loop_start = threshold + i * size;
loop_end = threshold + (i+1) * size;
}
for(int j = loop_start; j< loop_end; j++) {
tmp_sum += v[j * inc];
}
row_data[threadIdx.x] = tmp_sum;
__syncthreads();
*sum = _sum_reduce(row_data);
}
template<typename Real>
__global__
static void _pvec_sum(Real* v, Real* g, int dim, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int start = size * i;
if (start >= dim) return;
int end = start + size;
if (end > dim) end = dim;
__shared__ Real row_data[CU1DBLOCK];
Real sum = 0;
for (int j = start; j < end; j++)
sum += v[j];
row_data[threadIdx.x] = sum;
__syncthreads();
g[blockIdx.x] = _sum_reduce(row_data);
}
template<typename Real>
__global__
static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < dim) {
if ( v[i] < floor_val) {
v[i] = floor_val;
count[i] = 1;
} else {
count[i] = 0;
}
}
}
// Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index.
// this is for no reason, really, I just happened to prefer this
// at the time. [dan]
template<typename Real>
__global__
static void _apply_pow(Real* mat, Real power, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i * d.stride + j;
if (i < d.rows && j < d.cols) {
if (power == 1.0)
return;
if (power == 2.0) {
mat[index] = mat[index] * mat[index];
} else if (power == 0.5) {
if (!(mat[index] >= 0.0))
return;
mat[index] = sqrt(mat[index]);
} else {
mat[index] = pow(mat[index], power);
}
}
}
template<typename Real>
__global__
static void _apply_pow_abs(Real* mat, Real power, bool include_sign, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i * d.stride + j;
if (i < d.rows && j < d.cols) {
if (include_sign == true && mat[index] < 0) {
if (power == 1.0)
mat[index] = -std::abs(mat[index]);
if (power == 2.0) {
mat[index] = -mat[index] * mat[index];
} else if (power == 0.5) {
mat[index] = -sqrt(std::abs(mat[index]));
} else {
mat[index] = -pow(std::abs(mat[index]), power);
}
} else {
if (power == 1.0)
mat[index] = std::abs(mat[index]);
if (power == 2.0) {
mat[index] = mat[index] * mat[index];
} else if (power == 0.5) {
mat[index] = sqrt(std::abs(mat[index]));
} else if (power < 0.0 && mat[index] == 0.0) {
mat[index] = 0.0;
} else {
mat[index] = pow(std::abs(mat[index]), power);
}
}
}
}
// Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index.
// this is for no reason, really, I just happened to prefer this
// at the time. [dan]
template<typename Real>
__global__
static void _apply_heaviside(Real* mat, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i * d.stride + j;
if (i < d.rows && j < d.cols) {
mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0);
}
}
template<typename Real>
__global__
static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
if (mat[index] < floor_val)
mat[index] = floor_val;
}
}
template<typename Real>
__global__
static void _copy_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) {
// Note: in this kernel, the x dimension corresponds to rows and the y to columns,
// as it will be going forward.
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dst_dim.rows && j < dst_dim.cols) {
int index = reorder[j],
dst_index = i * dst_dim.stride + j;
if (index >= 0) {
int src_index = i * src_stride + reorder[j];
Real val = src[src_index];
dst[dst_index] = val;
} else {
dst[dst_index] = 0.0;
}
}
}
template<typename Real>
__global__
static void _copy_rows(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) {
// Note: in this kernel, the x dimension corresponds to rows and the y to columns,
// as it will be going forward.
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dst_dim.rows && j < dst_dim.cols) {
int index = reorder[i],
dst_index = i * dst_dim.stride + j;
if (index >= 0) {
int src_index = reorder[i] * src_stride + j;
Real val = src[src_index];
dst[dst_index] = val;
} else {
dst[dst_index] = 0;
}
}
}
template<typename Real>
__global__
static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j * d.stride;
if (i < d.cols && j < d.rows ) {
if (mat[index] > ceiling_val)
mat[index] = ceiling_val;
}
}
template<typename Real>
__global__
static void _add_row_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int i = blockIdx.y * blockDim.y + threadIdx.y; //col
int j = blockIdx.x * blockDim.x + threadIdx.x; //row
if(blockIdx.x > 0) return;
if(blockDim.y != 1) return;
__shared__ Real row_data[CU1DBLOCK];
//copy the input to row_data
row_data[j] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[i] += sum;
}
template<typename Real>
__global__
static void _add_col_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; //row
int j = blockIdx.y * blockDim.y + threadIdx.y; //col
if(blockIdx.x > 0) return;
if(blockDim.y != 1) return;
__shared__ Real row_data[CU1DBLOCK];
//copy the input to row_data
row_data[i] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[j] += sum;
}
template<typename Real>
__global__
static void _invert_elements(Real* data, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j*d.stride;
if (i < d.cols && j < d.rows)
data[index] = 1.0/data[index];
}
// matrix-wise, do data = alpha * data + beta * A * B^T,
// where B is a block matrix.
template<typename Real>
__global__
static void _add_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols,
int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data,
int B_num_blocks, Real alpha, Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data"
int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B.
if (i >= A_num_rows || j >= B_num_blocks) return;
const CuBlockMatrixData &cu_data = B_cu_data[j];
// BT means B transposed.
int BT_row_start = cu_data.col_offset,
BT_col_start = cu_data.row_offset,
BT_num_rows = cu_data.matrix_dim.cols,
BT_num_cols = cu_data.matrix_dim.rows,
BT_col_stride = cu_data.matrix_dim.stride;
const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void;
// we avoided a bunch of hassle by doing this (relates to Ansi-C requirement).
for (int k = 0; k < BT_num_cols; k++) {
const Real *this_BT_col = B_data + k * BT_col_stride;
const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride;
// this_A_row points to the element A[i][BT_row_start], it's really just
// part of this row of A.
Real sum = 0.0;
for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B.
sum += this_BT_col[l] * this_A_row[l * A_col_stride];
int index = i * dim.stride + (k + BT_col_start);
data[index] = alpha * sum + beta * data[index];
}
}
template<typename Real>
__global__
static void _add_mat_blockmat(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols,
int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data,
int B_num_blocks, Real alpha, Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data"
int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B.
if (i >= A_num_rows || j >= B_num_blocks) return;
const CuBlockMatrixData &block_data = B_cu_data[j];
int B_row_start = block_data.row_offset,
B_col_start = block_data.col_offset,
B_num_rows = block_data.matrix_dim.rows,
B_num_cols = block_data.matrix_dim.cols,
B_row_stride = block_data.matrix_dim.stride;
const Real *B_data = static_cast<Real*>(block_data.matrix_data); // Cast from void;
// we avoided a bunch of hassle by doing this (relates to Ansi-C requirement).
for (int k = 0; k < B_num_cols; k++) {
const Real *this_B_col = B_data + k;
const Real *this_A_row = A_data + i * A_row_stride + B_row_start * A_col_stride;
// this_A_row points to the element A[i][B_row_start], it's really just
// part of this row of A.
Real sum = 0.0;
for (int l = 0; l < B_num_rows; l++) // l indexes rows of B.
sum += this_B_col[l * B_row_stride] * this_A_row[l * A_col_stride];
int index = i * dim.stride + (k + B_col_start);
data[index] = alpha * sum + beta * data[index];
}
}
// For a block matrix B, does B = alpha * C * D + beta * B.
// the (x,y,z) indices are the block index, then the row
// and column indices within the block. Note: transposition of C and D
// is handled by swapping the (num_rows,num_cols) and (row_stride,col_stride),
// so it's invisible to this code. The num-cols and num-rows of C and D
// are only provided to the extent that they are not already determined
// by other quantities.
template<typename Real>
__global__
static void _block_add_mat_mat(CuBlockMatrixData *B_cu_data, int num_blocks,
const Real *C_data, int C_num_cols,
int C_row_stride, int C_col_stride,
const Real *D_data,
int D_row_stride, int D_col_stride,
Real alpha, Real beta) {
int b = blockIdx.x * blockDim.x + threadIdx.x; // block-index into B.
int i = blockIdx.y * blockDim.y + threadIdx.y; // row-index into b'th block
int j = blockIdx.z * blockDim.z + threadIdx.z; // col-index into b'th block
if (b >= num_blocks) return;
const CuBlockMatrixData &block_data = B_cu_data[b];
if (i >= block_data.matrix_dim.rows || j >= block_data.matrix_dim.cols)
return; // we're outside the dimensions of the b'th block.
// B_elem is the element of B we're writing to.
Real *B_elem = reinterpret_cast<Real*>(block_data.matrix_data) +
i * block_data.matrix_dim.stride + j;
Real B_val = *B_elem;
// B_row and B_col are the (row, col) index into the full matrix B.
int B_row = block_data.row_offset + i, B_col = block_data.col_offset + j;
const Real *C_row_data = C_data + C_row_stride * B_row,
*D_col_data = D_data + D_col_stride * B_col;
Real sum = 0.0;
for (int k = 0; k < C_num_cols; k++) {
sum += C_row_data[k * C_col_stride] * D_col_data[k * D_row_stride];
}
*B_elem = alpha * sum + beta * B_val;
}
template<typename Real>
__global__
static void _blockadd_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols,
int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data,
int B_num_blocks, Real alpha, Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data"
int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B.
if (i >= A_num_rows || j >= B_num_blocks) return;
const CuBlockMatrixData &cu_data = B_cu_data[j];
// BT means B transposed.
int BT_row_start = cu_data.col_offset,
BT_col_start = cu_data.row_offset,
BT_num_rows = cu_data.matrix_dim.cols,
BT_num_cols = cu_data.matrix_dim.rows,
BT_col_stride = cu_data.matrix_dim.stride;
const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void;
// we avoided a bunch of hassle by doing this (relates to Ansi-C requirement).
for (int k = 0; k < BT_num_cols; k++) {
const Real *this_BT_col = B_data + k * BT_col_stride;
const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride;
// this_A_row points to the element A[i][BT_row_start], it's really just
// part of this row of A.
Real sum = 0.0;
for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B.
sum += this_BT_col[l] * this_A_row[l * A_col_stride];
int index = i * dim.stride + (k + BT_col_start);
data[index] = alpha * sum + beta * data[index];
}
}
/*
template<typename Real>
__global__
static void _block_conv_mat(Real *C_block, int C_row_stride, int C_num_rows,
const Real *A_block, int A_block_num_rows, int A_block_num_cols,
const Real *B_block, int B_block_num_rows, int B_block_num_cols) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index for A
int j = blockIdx.y * blockDim.y + threadIdx.y; // row_index into block c
int k = blockIdx.z * blockDim.z + threadIdx.z; // col_indx into block c
if (i > C_num_rows) return;
if (j >= (A_block_num_rows - B_block_num_rows + 1) ||
k >= (A_block_num_cols - B_block_num_cols + 1)) return; // we are outside dimension of block B;
Real sum = 0;
int A_index_const = i*(A_block_num_rows * A_block_num_cols)+j*A_block_num_cols+k;
for (int row = 0; row < B_block_num_rows; row++) {
for (int col = 0; col < B_block_num_cols; col++) {
int A_index = A_index_const + row * A_block_num_cols + col;
sum += A_block[A_index] * B_block[row * B_block_num_rows + col];
}
}
C_block[i * C_row_stride + j * (A_block_num_cols-B_block_num_cols+1) + k] = sum;
}
*/
template<typename Real>
__global__
static void _block_conv_mat(Real *C, int C_row_stride, int C_block_row_stride,
int C_block_num_rows, int C_block_num_cols,
const Real *A, int block_dim_x, int A_num_rows,
int A_block_num_rows, int A_block_num_cols, const Real *B, int block_dim_y,
int B_block_num_rows, int B_block_num_cols) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index for A
int j = blockIdx.y * blockDim.y + threadIdx.y; // block_index into blocks A and B
int k = blockIdx.z * blockDim.z + threadIdx.z; // indx into each block of c
if (i > A_num_rows) return;
if (j >= (block_dim_x*block_dim_y) ||
(k >= C_block_row_stride)) return; // we are outside dimension of block B;
int block_num = j / block_dim_y;
int filter_num = j % block_dim_y;
int C_block_row_num = k / C_block_num_cols;
int C_block_col_num = k % C_block_num_cols;
Real sum = 0;
int A_index_const = (i*block_dim_x + block_num) * (A_block_num_rows * A_block_num_cols)+
C_block_row_num * A_block_num_cols + C_block_col_num;
int B_index_const = j * B_block_num_rows * B_block_num_cols;
__shared__ Real filter[CU1DBLOCK];//[B_block_num_rows * B_block_num_cols];
for (int row = 0; row < B_block_num_rows; row++)
for (int col = 0; col < B_block_num_cols; col++)
filter[row*B_block_num_cols+col] = B[B_index_const + row * B_block_num_rows + col];
__syncthreads();
for (int row = 0; row < B_block_num_rows; row++) {
for (int col = 0; col < B_block_num_cols; col++) {
int A_index = A_index_const + row * A_block_num_cols + col;
sum += __mul24(A[A_index], filter[row * B_block_num_rows + col]);
//sum += __mul24(A[A_index], B[B_index_const + row * B_block_num_rows + col]);
}
}
__syncthreads();
int C_index = i * C_row_stride + j * C_block_row_stride + k;
C[C_index] = sum;
}
// Since this is a newer kernel, x is the row-index and y is the
// column-index.
template<typename Real>
__global__
static void _sum_column_ranges(Real *data, MatrixDim dim,
const Real *src_data,
MatrixDim src_dim,
const Int32Pair *indices) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= dim.rows || col >= dim.cols)
return;
int dst_index = row * dim.stride + col,
src_start_index = row * src_dim.stride + indices[col].first,
src_end_index = row * src_dim.stride + indices[col].second;
Real sum = 0.0;
for (int index = src_start_index; index < src_end_index; index++)
sum += src_data[index];
data[dst_index] = sum;
}
template<typename Real>
__global__
static void _soft_hinge(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j*d.stride, src_index = i + j*src_stride;
// compute the function y[index] = log(1 + exp(x[index]))
if(i < d.cols && j < d.rows) {
Real val = x[src_index], result;
if (val >= 10.0) result = val; // function approaches y=x as x gets large
else result = log1p(exp(val));
y[dst_index] = result;
}
}
template<typename Real>
__global__
static void _group_pnorm(Real *y, const Real *x, MatrixDim d, int src_stride,
int group_size, Real power) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < d.rows && i < d.cols) {
int dst_index = i + j * d.stride;
Real tmp = 0;
int src_begin_index = i * group_size + j * src_stride;
int src_end_index = src_begin_index + group_size;
for (int src_index = src_begin_index; src_index < src_end_index;
src_index ++) {
tmp += pow(std::abs(x[src_index]), power);
}
tmp = pow(tmp, Real(1.0 / power));
if (!isnan(tmp)) {
y[dst_index] = tmp;
} else {
Real max_value = x[src_begin_index], min_value = max_value;
for (int src_index = src_begin_index + 1;
src_index < src_end_index; src_index ++) {
if (x[src_index] > max_value)
max_value = x[src_index];
if (x[src_index] < min_value)
min_value = x[src_index];
}
tmp = 0.0;
Real max_abs_value = (max_value > -min_value ?
max_value : -min_value); // let max_value be the
// largest abs(value)
if (max_abs_value == 0) {
y[dst_index] = 0.0;
} else {
for (int src_index = src_begin_index;
src_index < src_end_index; src_index ++) {
Real x_scaled = x[src_index] / max_abs_value;
tmp += pow(std::abs(x_scaled), Real(power));
}
y[dst_index] = pow(tmp, Real(1.0 / power)) * max_abs_value;
}
}
}
}
/*
* cu::
*/
template<typename Real>
__global__
static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j*d.stride, src_index = i + j*src_stride;
if(i < d.cols && j < d.rows) {
Real res = 1.0 / (1.0 + exp(-x[src_index]));
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j*d.stride, src_index = i + j*src_stride;
if (i < d.cols && j < d.rows )
eout[dst_index] = y[src_index]*(1.0-y[src_index]) * e[src_index];
}
template<typename Real>
__global__
static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j*d.stride, src_index = i + j * src_stride;
if(i < d.cols && j < d.rows) {
Real exp_2x = exp(2.0*x[src_index]);
Real res;
if(isinf(exp_2x)) {
res = 1.0;
} else {
res = (exp_2x - 1.0) / (exp_2x + 1.0);
}
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _relu(Real*y, const Real*x, MatrixDim d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j*d.stride, src_index = i + j * src_stride;
if( i < d.cols && j < d.rows ) {
y[dst_index] = (x[src_index] > 0 ) ? x[src_index] : 0;
}
}
template<typename Real>
__global__
static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j*d.stride;
if (i < d.cols && j < d.rows )
eout[index] = (1.0 - y[index]*y[index]) * e[index];
}
template<typename Real>
__global__
static void _diff_relu(Real*eout, const Real*e, const Real*y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows )
eout[index] = (y[index] > 0) ? e[index] : 0;
}
template<typename Real>
__global__
static void _softmax(Real*y, const Real*x, MatrixDim d) {
int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x;
if(j >= d.rows) return;
//copy to output and find max...
double max = -1e20;
double sum = 0.0;
for(int32_cuda i=0; i<d.cols; i++) {
if(max < x[i+j*d.stride]) max = x[i+j*d.stride];
y[i+j*d.stride] = x[i+j*d.stride];
}
//subtract max, apply exp, sum up...
for(int32_cuda i=0; i<d.cols; i++) {
y[i+j*d.stride] = exp(y[i+j*d.stride] - max);
sum += y[i+j*d.stride];
}
//normalize by sum...
for(int32_cuda i=0; i<d.cols; i++) {
y[i+j*d.stride] /= sum;
}
}
template<typename Real>
__global__
static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) {
int j = blockIdx.x;
int THREADS = blockDim.x;
if (j >= d.rows) return;
__shared__ Real aux[CU1DBLOCK];
int steps = (d.cols - 1) / THREADS + 1;
//copy input to aux
aux[threadIdx.x] = x[threadIdx.x+j*d.stride];
for(int i=1; i<steps; ++i) {
if(threadIdx.x+i*THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x+i*THREADS+j*d.stride])
aux[threadIdx.x] = x[threadIdx.x+i*THREADS+j*d.stride];
}
//get the maximum value
int nTotalThreads = THREADS;
__syncthreads();
while(nTotalThreads > 1) {
int halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
if(threadIdx.x+halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x+halfPoint])
aux[threadIdx.x] = aux[threadIdx.x + halfPoint];
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
Real max = aux[0];
__syncthreads();
// subtract max, apply exp, sum up...
y[threadIdx.x+j*d.stride] = exp(x[threadIdx.x+j*d.stride] - max);
aux[threadIdx.x] = y[threadIdx.x+j*d.stride];
for(int i=1; i<steps; i++) {
if(threadIdx.x+i*THREADS < d.cols) {
y[threadIdx.x+i*THREADS+j*d.stride] = exp(x[threadIdx.x+i*THREADS+j*d.stride] - max);
aux[threadIdx.x] += y[threadIdx.x+i*THREADS+j*d.stride];
}
}
nTotalThreads = THREADS;
__syncthreads();
while(nTotalThreads > 1) {
int halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
if(threadIdx.x+halfPoint < nTotalThreads)
aux[threadIdx.x] += aux[threadIdx.x + halfPoint];
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
Real sum = aux[0];
__syncthreads();
//normalize by sum...
for(int i=0; i<steps; i++) {
if(threadIdx.x+i*THREADS < d.cols) {
y[threadIdx.x+i*THREADS+j*d.stride] = y[threadIdx.x+i*THREADS+j*d.stride] / sum;
}
}
}
template<typename Real>
__global__
static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if (i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = i % d_in.cols;
int32_cuda src_row = j + off[i / d_in.cols];
if(src_row < 0) src_row = 0;
if(src_row >= d_in.rows) src_row = d_in.rows-1;
y[index] = x[src_col + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _take_mean(const Real* x, Real* y, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index1 = i + j * d_in.stride;
int32_cuda index2 = j + i * d_in.stride;
if (i <= j && j < d_in.rows) {
int32_cuda index_sp = (j * (j+1) / 2) + i;
y[index_sp] = 0.5 * (x[index1] + x[index2]);
}
}
template<typename Real>
__global__
static void _take_lower(const Real* x, Real* y, MatrixDim d_in) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index
int j = blockIdx.y * blockDim.y + threadIdx.y; // col-index
if (j > i || i >= d_in.rows) return;
int index = i * d_in.stride + j;
Real val = x[index];
int index_sp = (i * (i+1) / 2) + j;
y[index_sp] = val;
}
template<typename Real>
__global__
static void _take_upper(const Real* x, Real* y, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index
if (j < i || j >= d_in.rows) return;
int32_cuda index = i * d_in.stride + j;
int32_cuda index_sp = (j * (j+1) / 2) + i;
y[index_sp] = x[index];
}
template<typename Real>
__global__
static void _vec_copy_diag_from_packed(Real* y, const Real* x, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i+1) * (i+2) / 2) - 1;
if (i < dim) {
y[i] = x[index];
}
}
template<typename Real>
__global__
static void _copy_from_sp(const Real* x, Real* y, MatrixDim dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dim.rows && j < dim.cols) {
int dst_index = i * dim.stride + j, src_index;
if (j <= i) { // no transpose
src_index = (i * (i+1) / 2) + j;
} else { // transpose.
src_index = (j * (j+1) / 2) + i;
}
y[dst_index] = x[src_index];
}
}
template<typename Real>
__global__
static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if (i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = copy_from[i];
if(src_col >= 0 && src_col < d_in.cols) {
y[index] = x[src_col + j*d_in.stride];
} else {
y[index] = 1.0/0.0;
}
}
}
template<typename Real>
__global__
static void _one(Real* x, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < dim ) {
x[i] = 1.0;
}
}
template<typename Real>
__global__
static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if (i < d_out.cols && j < d_out.rows ) {
int32_cuda src_row = copy_from[j];
y[index] = x[i + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows) {
if(wei[index]==0.0) return; //skip L1 if zero weight!
Real l1_signed = l1;
if(wei[index] < 0.0) //flip sign
l1_signed = -l1;
Real before = wei[index];
Real after = wei[index] -lr*grad[index] -l1_signed;//simulate update
if((after > 0.0) ^ (before > 0.0)) { //sign changed?
wei[index] = 0.0;
grad[index] = 0.0;
} else {
wei[index] -= l1_signed;
}
}
}
template<typename Real>
__global__
static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if(blockIdx.x > 0) return;
if(blockDim.y != 1) return;
__shared__ Real value[CU1DBLOCK];
__shared__ int32_cuda index[CU1DBLOCK];
//copy to shared memory
value[threadIdx.x] = mat[i+j*d.stride];
index[threadIdx.x] = threadIdx.x;
__syncthreads();
//get the id of the max value
int32_cuda out_max = _max_id_reduce(value, index);
__syncthreads();
//see if it's bigger value
if(threadIdx.x == 0) {
if(vec_val[j] <= mat[out_max+j*d.stride]) {
vec_val[j] = mat[out_max+j*d.stride];
vec_id[j] = voff+out_max;
}
}
}
template<typename Real>
__global__
static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if(i>0) return;
if(j<d.rows) {
int32_cuda index = vec_tgt[j] + j*d.stride;
Real value = mat_net_out[index];
if(value < 1e-20) value = 1e-20;
vec_log_post[j] = log(value);
mat_net_out[index] -= 1.0;
}
}
template<typename Real>
__global__
static void _softmax_part(const Real* X, const int32_cuda* vec_ids, Real* Y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows) {
Real tmp = X[index] - X[vec_ids[j] + j*d.stride];
Y[index] = exp(tmp);
}
}
/***********************************************************************
* ANSI-C wrappers of CUDA kernels
*/
/*
* "int32"
*/
void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
/*
* "float"
*/
/*
* CuMatrix
*/
void cudaF_copy_upp_low(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_upp_low), dim3(Gr),dim3(Bl), 0, 0, A,dimA); }
void cudaF_copy_low_upp(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_low_upp), dim3(Gr),dim3(Bl), 0, 0, A,dimA); }
void cudaF_add_diag_vec_mat(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim,
const float *vec, const float *mat2, int mat2_row_stride,
int mat2_col_stride, float beta) {
hipLaunchKernelGGL(( _add_diag_vec_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, vec, mat2, mat2_row_stride,
mat2_col_stride, beta);
}
void cudaF_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaFD_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaF_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaFD_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaF_copy_col_from_vec(int Gr, int Bl, float* mat, const float* v, int col, MatrixDim d) {
hipLaunchKernelGGL(( _copy_col_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat,v,col,d);
}
void cudaF_transpose_matrix(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _transpose_matrix), dim3(Gr),dim3(Bl), 0, 0, mat, d);
}
void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) {
hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d);
}
void cudaF_apply_pow_abs(dim3 Gr, dim3 Bl, float* mat, float power, bool include_sign, MatrixDim d) {
hipLaunchKernelGGL(( _apply_pow_abs), dim3(Gr),dim3(Bl), 0, 0, mat, power, include_sign, d);
}
void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d);
}
void cudaF_copy_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) {
hipLaunchKernelGGL(( _copy_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride);
}
void cudaF_copy_rows(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) {
hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride);
}
void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) {
hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d);
}
void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val, MatrixDim d) {
hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d);
}
void cudaF_set_diag(int Gr, int Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _set_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_set_diag_packed(int Gr, int Bl, float* mat, float value, int dim) {
hipLaunchKernelGGL(( _set_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim);
}
void cudaF_add_diag_packed(int Gr, int Bl, float* mat, float value, int dim) {
hipLaunchKernelGGL(( _add_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim);
}
void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_set_zero_above_diag(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _set_zero_above_diag), dim3(Gr),dim3(Bl), 0, 0, mat, d);
}
void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_scale_diag(int Gr, int Bl, float* mat, float value, int dim) {
hipLaunchKernelGGL(( _scale_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim);
}
void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) {
hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride);
}
void cudaF_max(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) {
hipLaunchKernelGGL(( _max), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride);
}
void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) {
hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) {
hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaF_mul_rows_group_mat(dim3 Gr, dim3 Bl, float *y, const float *x,
MatrixDim d, int src_stride, int group_size) {
hipLaunchKernelGGL(( _mul_rows_group_mat), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size);
}
void cudaF_calc_pnorm_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1,
const float *x2, MatrixDim d, int src_stride,
int group_size, float power) {
hipLaunchKernelGGL(( _calc_pnorm_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size, power);
}
void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) {
hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d);
}
void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst, MatrixDim d, int src_stride, int A_trans) {
if (A_trans) {
hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride);
} else {
hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride);
}
}
void cudaF_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const float *A, const float *B, const float *C, float *dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_mat_mat_div_mat), dim3(Gr),dim3(Bl), 0, 0, A,B,C,dst,d);
}
void cudaF_sy_add_tr2(dim3 Gr, dim3 Bl, float alpha, float beta, const float* T, MatrixDim tdim,
float *S, MatrixDim sdim) {
hipLaunchKernelGGL(( _sy_add_tr2), dim3(Gr),dim3(Bl), 0, 0, alpha, beta, T, tdim, S, sdim);
}
void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d);
}
void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d);
}
// CURRENTLY UNUSED...
void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaF_replace_value(int Gr, int Bl, float *v, int dim, float orig, float changed) {
hipLaunchKernelGGL(( _replace_value), dim3(Gr),dim3(Bl), 0, 0, v, dim, orig, changed);
}
void cudaF_set_bias_params(int Gr, int Bl, float* v, const float* a, float param_1, float param_2, float param_3, int* flag, int dim) {
hipLaunchKernelGGL(( _set_bias_params), dim3(Gr),dim3(Bl), 0, 0, v,a,param_1,param_2,param_3,flag,dim);
}
void cudaF_copy_from_vec_df(int Gr, int Bl, double* v_out, const float* v_in, int dim) {
hipLaunchKernelGGL(( _copy_from_vec_df), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim);
}
void cudaF_copy_from_vec_fd(int Gr, int Bl, float* v_out, const float* v_in, int dim) {
hipLaunchKernelGGL(( _copy_from_vec_fd), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim);
}
void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) {
hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim);
}
void cudaF_vec_min(const float* v, float* value, int dim) {
hipLaunchKernelGGL(( _vec_min), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim);
}
void cudaF_vec_max(const float* v, float* value, int dim) {
hipLaunchKernelGGL(( _vec_max), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim);
}
void cudaF_trace_mat_mat_trans(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) {
hipLaunchKernelGGL(( _trace_mat_mat_trans<float,4>) , dim3(4),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value);
}
void cudaF_trace_mat_mat(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) {
hipLaunchKernelGGL(( _trace_mat_mat<float,2>) , dim3(2),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value);
}
void cudaF_add_diag_mat_mat(int Gr, int Bl, float alpha, float* v, int v_dim, const float* M,
int M_cols, int M_row_stride, int M_col_stride, const float *N, int N_row_stride,
int N_col_stride, int threads_per_element, float beta) {
hipLaunchKernelGGL(( _add_diag_mat_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride,
N, N_row_stride, N_col_stride, threads_per_element, beta);
}
void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x, const float* y, float beta, int dim) {
hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim);
}
void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) {
hipLaunchKernelGGL(( _vec_sum), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc);
}
void cudaF_pvec_sum(int Gr, int Bl, float* v, float* pvec_sum, int dim, int size) {
hipLaunchKernelGGL(( _pvec_sum), dim3(Gr),dim3(Bl), 0, 0, v, pvec_sum, dim, size);
}
void cudaF_matrix_add_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, float alpha, MatrixElement<float>* x, int s) {
hipLaunchKernelGGL(( _cuda_matrix_add_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, alpha, x, s);
}
void cudaF_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<float>* x, int s, const float* z, MatrixDim d, float* z2, MatrixDim d2, float* t) {
hipLaunchKernelGGL(( _cuda_comp_obj_deriv), dim3(Gr),dim3(Bl), 0, 0, x,s,z,d,z2,d2,t);
}
void cudaD_comp_obj_deriv(dim3 Gr,dim3 Bl, MatrixElement<double>* x, int s, const double* z, MatrixDim d, double* z2, MatrixDim d2, double* t) {
hipLaunchKernelGGL(( _cuda_comp_obj_deriv), dim3(Gr),dim3(Bl), 0, 0, x,s,z,d,z2,d2,t);
}
void cudaF_vec_copy_diag_from_packed(int Gr, int Bl, float *dst, const float *src, int dim) {
hipLaunchKernelGGL(( _vec_copy_diag_from_packed), dim3(Gr),dim3(Bl), 0, 0, dst,src,dim);
}
void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val, float *count, int dim) {
hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim);
}
void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) {
hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim);
}
void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) {
hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim);
}
void cudaF_add_row_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_row_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaF_add_col_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_col_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) {
hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d);
}
void cudaF_add_mat_blockmat(dim3 Gr, dim3 Bl, float *data, MatrixDim d, const float *Adata,
int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride,
const CuBlockMatrixData *B_cu_data, int B_num_blocks,
float alpha, float beta, int B_trans) {
if (B_trans) {
hipLaunchKernelGGL(( _add_mat_blockmat_trans), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data,
B_num_blocks, alpha, beta);
} else {
hipLaunchKernelGGL(( _add_mat_blockmat), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data,
B_num_blocks, alpha, beta);
}
}
void cudaF_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks,
const float *C_data, int C_num_cols, int C_row_stride, int C_col_stride,
const float *D_data, int D_row_stride, int D_col_stride,
float alpha, float beta) {
hipLaunchKernelGGL(( _block_add_mat_mat), dim3(Gr),dim3(Bl), 0, 0, B_cu_data, num_blocks, C_data, C_num_cols,
C_row_stride, C_col_stride, D_data, D_row_stride,
D_col_stride, alpha, beta);
}
void cudaF_block_conv_mat(dim3 Gr, dim3 Bl, float *C, int C_row_stride, int C_block_row_stride,
int C_block_num_rows, int C_block_num_cols,
const float *A, int block_dim_x, int A_num_rows, int A_block_num_rows,
int A_block_num_cols, const float *B, int block_dim_y,
int B_block_num_rows, int B_block_num_cols) {
hipLaunchKernelGGL(( _block_conv_mat), dim3(Gr), dim3(Bl), 0, 0, C, C_row_stride, C_block_row_stride,
C_block_num_rows, C_block_num_cols,
A, block_dim_x, A_num_rows, A_block_num_rows, A_block_num_cols,
B, block_dim_y, B_block_num_rows, B_block_num_cols);
}
/*
* cu::
*/
void cudaF_soft_hinge (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _soft_hinge), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaF_group_pnorm(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size, float power) {
hipLaunchKernelGGL(( _group_pnorm), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, power);
}
void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, src_stride);
}
void cudaF_tanh (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaF_diff_tanh (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d) {
hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d);
}
void cudaF_relu (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _relu), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaF_diff_relu (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d) {
hipLaunchKernelGGL(( _diff_relu), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d);
}
void cudaF_softmax (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d) {
hipLaunchKernelGGL(( _softmax), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaF_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaF_softmax_part(dim3 Gr, dim3 Bl, const float* X, const int32_cuda* vec_ids, float* Y, MatrixDim d) {
hipLaunchKernelGGL(( _softmax_part), dim3(Gr),dim3(Bl), 0, 0, X,vec_ids,Y,d);
}
void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in);
}
void cudaF_one(int Gr, int Bl, float* x, int dim) {
hipLaunchKernelGGL(( _one), dim3(Gr),dim3(Bl), 0, 0, x,dim);
}
void cudaF_take_mean(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) {
hipLaunchKernelGGL(( _take_mean), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in);
}
void cudaF_take_lower(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) {
hipLaunchKernelGGL(( _take_lower), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in);
}
void cudaF_take_upper(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) {
hipLaunchKernelGGL(( _take_upper), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in);
}
void cudaF_copy_from_sp(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim dim) {
hipLaunchKernelGGL(( _copy_from_sp), dim3(Gr),dim3(Bl), 0, 0, x, y, dim);
}
void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d) {
hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d);
}
void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d);
}
void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) {
hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d);
}
void cudaF_copy_rows_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out, const float *v_in) {
hipLaunchKernelGGL(( _copy_rows_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat_out, d_out, v_in);
}
void cudaF_copy_col_from_mat(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) {
hipLaunchKernelGGL(( _copy_col_from_mat), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim);
}
void cudaF_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const float* mat, MatrixDim dmat, int dim) {
hipLaunchKernelGGL(( _copy_col_from_mat_df), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim);
}
void cudaF_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) {
hipLaunchKernelGGL(( _copy_col_from_mat_fd), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim);
}
void cudaF_sum_column_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim,
const float *src_data, MatrixDim src_dim,
const Int32Pair *indices) {
hipLaunchKernelGGL(( _sum_column_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indices);
}
void cudaF_matrix_lookup(dim3 Gr, dim3 Bl, const float *data, MatrixDim dim,
const Int32Pair *indices, int indices_size,
float *output) {
hipLaunchKernelGGL(( _matrix_lookup), dim3(Gr),dim3(Bl), 0, 0, data, dim, indices, indices_size, output);
}
void cudaF_equal_element_mask(dim3 Gr, dim3 Bl, const float *mat1,
const float *mat2, float *mask, MatrixDim mat1_dim,
int mat2_stride, int mask_stride) {
hipLaunchKernelGGL(( _equal_element_mask), dim3(Gr),dim3(Bl), 0, 0, mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride);
}
/*
* "double"
*/
/*
* CuMatrix
*/
void cudaD_copy_upp_low(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_upp_low), dim3(Gr),dim3(Bl), 0, 0, A,dimA); }
void cudaD_copy_low_upp(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_low_upp), dim3(Gr),dim3(Bl), 0, 0, A,dimA); }
void cudaD_add_diag_vec_mat(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim,
const double *vec, const double *mat2, int mat2_row_stride,
int mat2_col_stride, double beta) {
hipLaunchKernelGGL(( _add_diag_vec_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, vec, mat2, mat2_row_stride,
mat2_col_stride, beta);
}
void cudaD_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaDF_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaD_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaDF_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaD_copy_col_from_vec(int Gr, int Bl, double* mat, const double* v, int col, MatrixDim d) {
hipLaunchKernelGGL(( _copy_col_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat,v,col,d);
}
void cudaD_transpose_matrix(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _transpose_matrix), dim3(Gr),dim3(Bl), 0, 0, mat, d);
}
void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) {
hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d);
}
void cudaD_apply_pow_abs(dim3 Gr, dim3 Bl, double* mat, double power, bool include_sign, MatrixDim d) {
hipLaunchKernelGGL(( _apply_pow_abs), dim3(Gr),dim3(Bl), 0, 0, mat, power, include_sign, d);
}
void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d);
}
void cudaD_copy_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) {
hipLaunchKernelGGL(( _copy_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride);
}
void cudaD_copy_rows(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) {
hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride);
}
void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) {
hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d);
}
void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val, MatrixDim d) {
hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d);
}
void cudaD_set_diag(int Gr, int Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _set_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_set_diag_packed(int Gr, int Bl, double* mat, double value, int dim) {
hipLaunchKernelGGL(( _set_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim);
}
void cudaD_add_diag_packed(int Gr, int Bl, double* mat, double value, int dim) {
hipLaunchKernelGGL(( _add_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim);
}
void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_set_zero_above_diag(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _set_zero_above_diag), dim3(Gr),dim3(Bl), 0, 0, mat, d);
}
void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_scale_diag(int Gr, int Bl, double* mat, double value, int dim) {
hipLaunchKernelGGL(( _scale_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim);
}
void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) {
hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride);
}
void cudaD_max(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) {
hipLaunchKernelGGL(( _max), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride);
}
void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) {
hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) {
hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaD_mul_rows_group_mat(dim3 Gr, dim3 Bl, double* y, const double* x,
MatrixDim d, int src_stride, int group_size) {
hipLaunchKernelGGL(( _mul_rows_group_mat), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size);
}
void cudaD_calc_pnorm_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1,
const double* x2, MatrixDim d, int src_stride,
int group_size, double power) {
hipLaunchKernelGGL(( _calc_pnorm_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size, power);
}
void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) {
hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d);
}
void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src, double* dst, MatrixDim d, int src_stride, int A_trans) {
if (A_trans) {
hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride);
} else {
hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride);
}
}
void cudaD_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const double *A, const double *B, const double *C, double *dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_mat_mat_div_mat), dim3(Gr),dim3(Bl), 0, 0, A,B,C,dst,d);
}
void cudaD_sy_add_tr2(dim3 Gr, dim3 Bl, double alpha, double beta, const double* T, MatrixDim tdim,
double *S, MatrixDim sdim) {
hipLaunchKernelGGL(( _sy_add_tr2), dim3(Gr),dim3(Bl), 0, 0, alpha, beta, T, tdim, S, sdim);
}
void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d);
}
void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d);
}
// CURRENTLY UNUSED...
void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaD_replace_value(int Gr, int Bl, double *v, int dim, double orig, double changed) {
hipLaunchKernelGGL(( _replace_value), dim3(Gr),dim3(Bl), 0, 0, v, dim, orig, changed);
}
void cudaD_set_bias_params(int Gr, int Bl, double* v, const double* a, double param_1, double param_2, double param_3, int* flag, int dim) {
hipLaunchKernelGGL(( _set_bias_params), dim3(Gr),dim3(Bl), 0, 0, v,a,param_1,param_2,param_3,flag,dim);
}
void cudaD_copy_from_vec_df(int Gr, int Bl, double* v_out, const double* v_in, int dim) {
hipLaunchKernelGGL(( _copy_from_vec_df), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim);
}
void cudaD_copy_from_vec_fd(int Gr, int Bl, float* v_out, const double* v_in, int dim) {
hipLaunchKernelGGL(( _copy_from_vec_fd), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim);
}
void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a, int dim) {
hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim);
}
void cudaD_vec_min(const double* v, double* value, int dim) {
hipLaunchKernelGGL(( _vec_min), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim);
}
void cudaD_vec_max(const double* v, double* value, int dim) {
hipLaunchKernelGGL(( _vec_max), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim);
}
void cudaD_trace_mat_mat_trans(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) {
hipLaunchKernelGGL(( _trace_mat_mat_trans<double,4>) , dim3(4),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value);
}
void cudaD_trace_mat_mat(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) {
hipLaunchKernelGGL(( _trace_mat_mat<double,2>) , dim3(2),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value);
}
void cudaD_add_diag_mat_mat(int Gr, int Bl, double alpha, double* v, int v_dim, const double* M,
int M_cols, int M_row_stride, int M_col_stride, const double *N, int N_row_stride,
int N_col_stride, int threads_per_element, double beta) {
hipLaunchKernelGGL(( _add_diag_mat_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride,
N, N_row_stride, N_col_stride, threads_per_element, beta);
}
void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x, const double* y, double beta, int dim) {
hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim);
}
void cudaD_copy_col_from_mat(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) {
hipLaunchKernelGGL(( _copy_col_from_mat), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim);
}
void cudaD_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) {
hipLaunchKernelGGL(( _copy_col_from_mat_df), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim);
}
void cudaD_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const double* mat, MatrixDim dmat, int dim) {
hipLaunchKernelGGL(( _copy_col_from_mat_fd), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim);
}
void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) {
hipLaunchKernelGGL(( _vec_sum), dim3(Gr),dim3(Bl), 0, 0, v,value,dim,inc);
}
void cudaD_pvec_sum(int Gr, int Bl, double* v, double* pvec_sum, int dim, int size) {
hipLaunchKernelGGL(( _pvec_sum), dim3(Gr),dim3(Bl), 0, 0, v,pvec_sum,dim,size);
}
void cudaD_matrix_add_elements(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, double alpha, MatrixElement<double>* x, int s) {
hipLaunchKernelGGL(( _cuda_matrix_add_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, alpha, x, s);
}
void cudaD_vec_copy_diag_from_packed(int Gr, int Bl, double *dst, const double *src, int dim) {
hipLaunchKernelGGL(( _vec_copy_diag_from_packed), dim3(Gr),dim3(Bl), 0, 0, dst,src,dim);
}
void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val, float *count, int dim) {
hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim);
}
void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) {
hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim);
}
void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) {
hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim);
}
void cudaD_add_row_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_row_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaD_add_col_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_col_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) {
hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d);
}
void cudaD_add_mat_blockmat(dim3 Gr, dim3 Bl, double *data, MatrixDim d, const double *Adata,
int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride,
const CuBlockMatrixData *B_cu_data, int B_num_blocks,
double alpha, double beta, int B_trans) {
if (B_trans) {
hipLaunchKernelGGL(( _add_mat_blockmat_trans), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data,
B_num_blocks, alpha, beta);
} else {
hipLaunchKernelGGL(( _add_mat_blockmat), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data,
B_num_blocks, alpha, beta);
}
}
void cudaD_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks,
const double *C_data, int C_num_cols, int C_row_stride, int C_col_stride,
const double *D_data, int D_row_stride, int D_col_stride,
double alpha, double beta) {
hipLaunchKernelGGL(( _block_add_mat_mat), dim3(Gr),dim3(Bl), 0, 0, B_cu_data, num_blocks, C_data, C_num_cols,
C_row_stride, C_col_stride, D_data, D_row_stride,
D_col_stride, alpha, beta);
}
void cudaD_block_conv_mat(dim3 Gr, dim3 Bl, double *C, int C_row_stride, int C_block_row_stride,
int C_block_num_rows, int C_block_num_cols,
const double *A, int block_dim_x, int A_num_rows, int A_block_num_rows,
int A_block_num_cols, const double *B, int block_dim_y,
int B_block_num_rows, int B_block_num_cols) {
hipLaunchKernelGGL(( _block_conv_mat), dim3(Gr), dim3(Bl), 0, 0, C, C_row_stride, C_block_row_stride,
C_block_num_rows, C_block_num_cols,
A, block_dim_x, A_num_rows, A_block_num_rows, A_block_num_cols,
B, block_dim_y, B_block_num_rows, B_block_num_cols);
}
/*
* cu::
*/
void cudaD_soft_hinge (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _soft_hinge), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaD_group_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride, int group_size, double power) {
hipLaunchKernelGGL(( _group_pnorm), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, power);
}
void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, src_stride);
}
void cudaD_tanh (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaD_diff_tanh (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d) {
hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d);
}
void cudaD_relu (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _relu), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaD_diff_relu (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d) {
hipLaunchKernelGGL(( _diff_relu), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d);
}
void cudaD_softmax (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d) {
hipLaunchKernelGGL(( _softmax), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaD_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaD_softmax_part(dim3 Gr, dim3 Bl, const double* X, const int32_cuda* vec_ids, double* Y, MatrixDim d) {
hipLaunchKernelGGL(( _softmax_part), dim3(Gr),dim3(Bl), 0, 0, X,vec_ids,Y,d);
}
void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in);
}
void cudaD_one(int Gr, int Bl, double* x, int dim) {
hipLaunchKernelGGL(( _one), dim3(Gr),dim3(Bl), 0, 0, x,dim);
}
void cudaD_take_mean(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) {
hipLaunchKernelGGL(( _take_mean), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in);
}
void cudaD_take_lower(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) {
hipLaunchKernelGGL(( _take_lower), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in);
}
void cudaD_take_upper(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) {
hipLaunchKernelGGL(( _take_upper), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in);
}
void cudaD_copy_from_sp(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_out) {
hipLaunchKernelGGL(( _copy_from_sp), dim3(Gr),dim3(Bl), 0, 0, x,y,d_out);
}
void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d) {
hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d);
}
void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d);
}
void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) {
hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d);
}
void cudaD_copy_rows_from_vec(dim3 Gr, dim3 Bl, double *mat_out, MatrixDim d_out, const double *v_in) {
hipLaunchKernelGGL(( _copy_rows_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat_out, d_out, v_in);
}
void cudaD_sum_column_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim,
const double *src_data, MatrixDim src_dim,
const Int32Pair *indices) {
hipLaunchKernelGGL(( _sum_column_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indices);
}
void cudaD_matrix_lookup(dim3 Gr, dim3 Bl, const double *data, MatrixDim dim,
const Int32Pair *indices, int indices_size,
double *output) {
hipLaunchKernelGGL(( _matrix_lookup), dim3(Gr),dim3(Bl), 0, 0, data, dim, indices, indices_size, output);
}
void cudaD_equal_element_mask(dim3 Gr, dim3 Bl, const double *mat1,
const double *mat2, double *mask, MatrixDim mat1_dim,
int mat2_stride, int mask_stride) {
hipLaunchKernelGGL(( _equal_element_mask), dim3(Gr),dim3(Bl), 0, 0, mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride);
}
/* Some conversion kernels for which it's more convenient to not name them F or D. */
void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
| 32b2f4b93631a0c50ad4794d9211424040b5e549.cu | // cudamatrix/cu-kernels.cu
// Copyright 2009-2012 Karel Vesely
// 2013 Ehsan Variani
// 2013 Johns Hopkins University (author: Daniel Povey)
// 2013 Hainan Xu
// 2013 Xiaohui Zhang
// 2013 Johns Hopkins University (author: Guoguo Chen)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
// In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers
#include <cfloat>
#include "cu-kernels-ansi.h"
/***********************************************************************
* Generic __device__ functions
*/
template<typename Real>
__device__
static Real _sum_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (sum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x >= halfPoint) { // was <
// Get the shared value stored by another thread
Real temp = 0.0;
if(threadIdx.x < nTotalThreads) { // was +halfPoint
temp = buffer[threadIdx.x]; // was +halfPoint
}
buffer[threadIdx.x - halfPoint] += temp;
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static Real _min_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (min)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active
if (threadIdx.x < halfPoint) {
if (threadIdx.x + halfPoint < nTotalThreads) {
Real temp = buffer[threadIdx.x + halfPoint];
if (temp < buffer[threadIdx.x])
buffer[threadIdx.x] = temp;
}
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static Real _max_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (max)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
if(threadIdx.x+halfPoint < nTotalThreads) {
Real temp = buffer[threadIdx.x + halfPoint];
if (temp > buffer[threadIdx.x])
buffer[threadIdx.x] = temp;
}
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (get index of maximum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = -1e20;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = val[idx[threadIdx.x + halfPoint]];
}
if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint];
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return idx[0];
}
/***********************************************************************
* CUDA kernels
* the functions are templated to have the float/double operations
*/
/*
* CuMatrix
*/
template<typename Real>
__global__
static void _copy_low_upp(Real* A, MatrixDim dimA) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i <= j || i >= dimA.rows) return;
int index_1 = i * dimA.stride + j;
int index_2 = j * dimA.stride + i;
A[index_2] = A[index_1];
}
template<typename Real>
__global__
static void _copy_upp_low(Real* A, MatrixDim dimA) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j <= i || j >= dimA.rows) return;
int index_1 = i * dimA.stride + j;
int index_2 = j * dimA.stride + i;
A[index_2] = A[index_1];
}
// mat += diag(vec) * mat2.
template<typename Real>
__global__
static void _add_diag_vec_mat(Real alpha, Real *mat, MatrixDim mat_dim,
const Real *vec, const Real *mat2, int mat2_row_stride,
int mat2_col_stride, Real beta) {
// Note from Dan: in this kernel, we make the x dimension correspond to the
// row index and y to the column index. That was not always the case for
// earlier kernels written by others.
int i = blockIdx.y * blockDim.y + threadIdx.y; // row index
int j = blockIdx.x * blockDim.x + threadIdx.x; // column index
int index = i * mat_dim.stride + j,
index2 = i * mat2_row_stride + j * mat2_col_stride;
if (i < mat_dim.rows && j < mat_dim.cols) {
mat[index] = alpha * vec[i] * mat2[index2] + beta * mat[index];
}
}
template<typename Real, typename OtherReal>
__global__
static void _copy_from_tp(Real* A, const OtherReal* B, MatrixDim dmat) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dmat.rows && j < dmat.cols) {
int32_cuda index_B = (i * (i+1) / 2) + j;
int32_cuda index_A = i * dmat.stride + j;
if (j <= i) {
A[index_A] = B[index_B];
} else {
A[index_A] = 0.0;
}
}
}
template<typename Real, typename OtherReal>
__global__
static void _copy_from_tp_trans(Real* A, const OtherReal* B, MatrixDim dmat) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
// transpose the indices used to index the source TpMatrix.
if (i < dmat.rows && j < dmat.cols) {
int32_cuda index_B = (j * (j+1) / 2) + i;
int32_cuda index_A = i * dmat.stride + j;
if (i <= j) {
A[index_A] = B[index_B];
} else {
A[index_A] = 0.0;
}
}
}
// for this kernel, following the newer pattern, the x-dim is the row-index, the
// y-dim is the col-index.
template<typename Real, typename OtherReal>
__global__
static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index.
int32_cuda index_out = j + i * d_out.stride;
int32_cuda index_in = j + i * d_in.stride;
if (i < d_out.rows && j < d_out.cols)
mat_out[index_out] = static_cast<Real>(mat_in[index_in]);
}
// for this kernel, the x-dim is the row-index at the output, the y-dim is the
// col-index at the output
template<typename Real, typename OtherReal>
__global__
static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index out
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index out
int32_cuda index_out = j + i * d_out.stride;
int32_cuda index_in = i + j * d_in.stride;
if (i < d_out.rows && j < d_out.cols)
mat_out[index_out] = static_cast<Real>(mat_in[index_in]);
}
template<typename Real>
__global__
static void _transpose_matrix(Real* mat, MatrixDim d) {
// Transposes a square matrix in-place.
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index
if (j >= i || i >= d.rows) { return; } // Only half the threads act.
int32_cuda index_a = j + i * d.stride,
index_b = i + j * d.stride;
Real a = mat[index_a], b = mat[index_b];
mat[index_a] = b;
mat[index_b] = a;
}
template<typename Real>
__global__
static void _copy_col_from_vec(Real* mat, const Real* v, int col, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < d.rows ) {
int32_cuda index = col + i * d.stride;
mat[index] = v[i];
}
}
template<typename Real>
__global__
static void _apply_exp(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if ( i < d.cols && j < d.rows ) {
mat[index] = exp(mat[index]);
}
}
template<typename Real>
__global__
static void _scale_diag(Real* mat, Real value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i+1)*(i+2)/2) - 1;
if ( i < dim ) {
mat[index] = value * mat[index];
}
}
template<typename Real>
__global__
static void _set_diag(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = i + i*d.stride;
if ( i < d.rows ) {
mat[index] = 1;
}
}
template<typename Real>
__global__
static void _set_diag_packed(Real* mat, Real value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i+1)*(i+2)/2) - 1;
if ( i < dim ) {
mat[index] = value;
}
}
template<typename Real>
__global__
static void _add_diag_packed(Real* mat, Real value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i+1)*(i+2)/2) - 1;
if ( i < dim ) {
mat[index] = mat[index] + value;
}
}
template<typename Real>
__global__
static void _set_const(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = value;
}
template<typename Real>
__global__
static void _set_zero_above_diag(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < i)
mat[index] = 0.0;
}
template<typename Real>
__global__
static void _add(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
mat[index] = mat[index] + value;
}
template<typename Real>
__global__
static void _scale(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
mat[index] = mat[index] * value;
}
template<typename Real>
__global__
static void _apply_log(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
mat[index] = log(mat[index]);
}
template<typename Real>
__global__
static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride;
if (i < dst_d.cols && j < dst_d.rows)
mat[dst_index] = mat[dst_index] * A[src_index];
}
template<typename Real>
__global__
static void _max(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride;
if ( i < dst_d.cols && j < dst_d.rows ) {
Real a = mat[dst_index], b = A[src_index];
mat[dst_index] = (a > b ? a : b);
}
}
template<typename Real>
__global__
static void _vec_mul_elements(Real* v, const Real* a, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim)
v[i] = v[i] * a[i];
}
template<typename Real>
__global__
static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
mat[index] *= scale[i];
}
template<typename Real>
__global__
static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
mat[index] *= scale[j];
}
template<typename Real>
__global__
static void _mul_rows_group_mat(Real *y, const Real *x, MatrixDim d,
int src_stride, int group_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < d.rows && i < d.cols ) {
int dst_index = i + j * d.stride;
int src_index = i / group_size + j * src_stride;
y[dst_index] *= x[src_index];
}
}
/// y is the derivative we will output; vec is the input we're computing
/// the group p-norm on, "norm" is the previously computed group p-norm.
template<typename Real>
__global__
static void _calc_pnorm_deriv(Real *deriv, const Real *vec, const Real *norm,
MatrixDim d, int src_stride, int group_size, Real power) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < d.rows && i < d.cols ) {
int dst_index = i + j * d.stride,
src_index = i / group_size + j * src_stride;
Real vec_element = vec[dst_index], // this is the element of the original vector.
norm_element = norm[src_index]; // this is the pnorm
Real vec_element_sign = (vec_element > 0 ? 1 : -1);
Real ans;
if (norm_element <= 0.0) ans = 0.0; // The derivative is either zero or undefined at the origin.
else ans = vec_element_sign * pow(std::abs(vec_element), power - 1) *
pow(norm_element, 1 - power);
deriv[dst_index] = ans;
}
}
/// Set each element to y = (x == orig ? changed : x).
template<typename Real>
__global__
static void _replace_value(Real *vec, int dim, Real orig, Real changed) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim)
if (vec[i] == orig) vec[i] = changed;
}
template<typename Real>
__global__
static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (j >= d.rows ) return;
//invert divider in shared memory
__shared__ Real inv[16];
if(threadIdx.x==0) {
inv[threadIdx.y] = 1.0/vec_div[j];
}
__syncthreads();
//multiply elements
if (i < d.cols && j < d.rows)
mat[index] *= inv[threadIdx.y];
}
template<typename Real>
__global__
static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
int32_cuda index_src = i + j*src_stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha*src[index_src] + dst[index];
}
template<typename Real>
__global__
static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j *d.stride;
int32_cuda index_src = j + i*src_stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha*src[index_src] + dst[index];
}
template<typename Real>
__global__
static void _add_mat_mat_div_mat(const Real* A, const Real* B, const Real* C, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
if (C[index] == 0)
dst[index] = A[index];
else
dst[index] = A[index] * B[index] / C[index];
}
// Given a matrix input S (not packed!) and a lower-triangular matrix L,
// this function does S = beta S + alpha * L^T L. This is used in PSD matrix inversion.
// The i index is the row of the destination S and the j the column (although of
// course the output is symmetric so it doesn't matter in a sense). The main point
// of this is to make use of various symmetries and zero-ness.
template<typename Real>
__global__
static void _sy_add_tr2(Real alpha, Real beta, const Real *T, MatrixDim tdim, Real *S,
MatrixDim sdim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= sdim.rows || j > i) return;
// this thread computes the dot-product of the i'th column of
// L with the j'th column of L. The values we're multiplying
// are only nonzero for row-index k greater or equal to
// max(i, j), which equals i.
Real sum = 0.0;
for (int k = i; k < sdim.rows; k++) {
int i_index = i + tdim.stride * k,
j_index = j + tdim.stride * k;
sum += T[i_index] * T[j_index];
}
int output_index1 = i * sdim.stride + j,
output_index2 = j * sdim.stride + i;
S[output_index1] = alpha * sum + beta * S[output_index1];
S[output_index2] = alpha * sum + beta * S[output_index2];
}
template<typename Real>
__global__
static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha*col[j] + beta*dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha*row[i] + beta*dst[index];
}
template<typename Real>
__global__
static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*dmat.stride;
int32_cuda index2 = i + j*dmask.stride;
if ( i < dmat.cols && j < dmat.rows )
if(mask[index2] == 0) mat[index] = 0;
}
/*
* CuVector
*/
// very limited application!
template<typename Real>
__global__
static void _set_bias_params(Real* v, const Real* a, Real param_1, Real param_2, Real param_3, int* flag, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < dim ) {
Real ratio = a[i] / param_3;
if ( ( ratio < 0.0 ) || ( ratio >= 1.01 )) {
*flag = 1;
return;
}
if ( ratio < param_1 ) {
Real factor = ((param_1/ratio) > param_2) ? param_2 : (param_1/ratio);
v[i] = v[i] / factor;
} else if ( ratio > param_1 ) {
Real factor = ((ratio/param_1) > param_2) ? param_2 : (ratio/param_1);
v[i] = v[i] * factor;
}
}
}
template<typename Real>
__global__
static void _copy_from_vec_df(double* v_out, const Real* v_in, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
v_out[i] = (double) v_in[i];
}
}
// This kernel writes a copy of the vector "v_in" to each row of the matrix
// "m_out". the dimension of v_in should be equal to the #columns of m_out. In
// this kernel, following the new pattern, x corresponds to row-index and y to
// column-index.
template<typename Real>
__global__
static void _copy_rows_from_vec(Real* m_out, MatrixDim d, const Real* v_in) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row index.
int j = blockIdx.y * blockDim.y + threadIdx.y; // column index.
if (i < d.rows && j < d.cols) {
int index = i * d.stride + j;
m_out[index] = v_in[j];
}
}
template<typename Real>
__global__
static void _copy_from_vec_fd(float* v_out, const Real* v_in, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if ( i < dim) {
v_out[i] = (float) v_in[i];
}
}
template<typename Real>
__global__
static void _vec_min(const Real* v, Real* value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= CU1DBLOCK) return;
__shared__ Real row_data[CU1DBLOCK];
int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK;
Real min = 1.0 / 0.0; // infinity.
for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) {
Real v_j = v[j];
if (v_j < min) min = v_j;
}
row_data[i] = min;
__syncthreads();
//get the sum
*value = _min_reduce(row_data);
}
template<typename Real>
__global__
static void _vec_max(const Real* v, Real* value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if(blockIdx.y > 0) return;
__shared__ Real row_data[CU1DBLOCK];
if(i >= CU1DBLOCK) return;
int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK;
Real max = -1.0 / 0.0; // -infinity.
for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) {
Real v_j = v[j];
if (v_j > max) max = v_j;
}
row_data[i] = max;
__syncthreads();
//get the sum
*value = _max_reduce(row_data);
}
// _trace_mat_mat expects to be called with 1 blocks, each of dimension
// CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x],
// i.e. value[0 through 0].
template<typename Real, int num_blocks>
__global__
static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return;
int num_elements = dA.rows * dA.cols,
num_threads = CU1DBLOCK * num_blocks;
int block_size = (num_elements + num_threads - 1) / num_threads;
int loop_start = i * block_size, loop_end = (i + 1) * block_size;
if (loop_end > num_elements)
loop_end = num_elements;
Real sum = 0.0;
for (int j = loop_start; j < loop_end; j++) {
// for (int j = i; j < num_elements; j += num_threads) {
int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is
// col-index in A; in B, it's reversed.
int index_A = col + row * dA.stride,
index_B = row + col * B_stride;
sum += A[index_A] * B[index_B];
}
__shared__ Real row_data[CU1DBLOCK];
row_data[threadIdx.x] = sum;
__syncthreads();
Real ans = _sum_reduce(row_data);
if (threadIdx.x == 0)
value[blockIdx.x] = ans;
}
// _trace_mat_mat_trans expects to be called with 4 blocks, each of dimension
// CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x],
// i.e. value[0 through 3].
template<typename Real, int num_blocks>
__global__
static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return;
int num_elements = dA.rows * dA.cols,
num_threads = CU1DBLOCK * num_blocks;
// int block_size = (num_elements + num_threads - 1) / num_threads;
// int loop_start = i * block_size, loop_end = (i + 1) * block_size;
// if (loop_end > num_elements)
// loop_end = num_elements;
Real sum = 0.0;
// for (int j = loop_start; j < loop_end; j++) {
for (int j = i; j < num_elements; j += num_threads) {
int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is
// col-index in A; in B, it's reversed.
int index_A = col + row * dA.stride,
index_B = col + row * B_stride;
sum += A[index_A] * B[index_B];
}
__shared__ Real row_data[CU1DBLOCK];
row_data[threadIdx.x] = sum;
__syncthreads();
Real ans = _sum_reduce(row_data);
if (threadIdx.x == 0)
value[blockIdx.x] = ans;
}
// Adds diag(M N) to v, where M and N are matrices. We supply row_stride and
// col_stride arguments for M and N, and swapping them allows us to transpose
// those matrices. Note: we imagine row-major indexing here, just like Kaldi
// and CBLAS (but unlike CUBLAS).
// This kernel expects the blockDim to be (CU1DBLOCK, 1) and the
// gridDim times CU1DBLOCK to be at least num-rows-of-v * threads_per_element.
// threads_per_element should be a power of 2.
template<typename Real>
__global__
static void _add_diag_mat_mat(
Real alpha, Real* v, int v_dim, const Real* M, int M_cols, int M_row_stride,
int M_col_stride, const Real *N, int N_row_stride, int N_col_stride,
int threads_per_element, Real beta) {
// we actually assume blockDim.x == CU1DBLOCK here.
// Each diagonal element of v is processed by "threads_per_element" threads.
__shared__ Real temp_data[CU1DBLOCK];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int v_idx = i / threads_per_element, // v_idx is the index into v that we are supposed to
sub_idx = i % threads_per_element; // add to; 0 <= sub_idx < threads_per_element tells
// us which block of elements we sum up.
if (v_idx >= v_dim) return;
Real sum = 0.0;
for (int j = sub_idx; j < M_cols; j += threads_per_element) {
int M_index = v_idx * M_row_stride + j * M_col_stride,
N_index = j * N_row_stride + v_idx * N_col_stride;
sum += M[M_index] * N[N_index];
}
temp_data[threadIdx.x] = sum;
// start_idx = threadIdx.x - sub_idx; // start of the position in temp_data
// that we want to sum up.
// The following is a tree-based reduction of the elements of temp_data from
// start_idx to start_idx + threads_per_element - 1; our own index is "sub_idx".
__syncthreads();
int num_total_threads = threads_per_element;
while (num_total_threads > 1) {
int half_point = ((1 + num_total_threads) >> 1);
if (sub_idx < half_point) {
Real temp = 0.0;
if (sub_idx + half_point < num_total_threads) {
temp = temp_data[threadIdx.x + half_point];
}
temp_data[threadIdx.x] += temp;
}
__syncthreads();
num_total_threads = half_point;
}
if (sub_idx == 0) {
v[v_idx] = beta * v[v_idx] + alpha * temp_data[threadIdx.x];
}
}
template<typename Real>
__global__
static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y, Real beta, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = alpha * x[i] * y[i] + beta * v[i];
}
template<typename Real>
__global__
static void _copy_col_from_mat(Real* v, int col, const Real* mat, MatrixDim dmat, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = col + i * dmat.stride;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = mat[index];
}
template<typename Real>
__global__
static void _copy_col_from_mat_df(double* v, int col, const Real* mat, MatrixDim dmat, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = col + i * dmat.stride;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = (double) mat[index];
}
template<typename Real>
__global__
static void _copy_col_from_mat_fd(float* v, int col, const Real* mat, MatrixDim dmat, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = col + i * dmat.stride;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = (float) mat[index];
}
template<typename Real>
__global__
static void _vec_apply_exp(Real* v, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
v[i] = exp(v[i]);
}
}
template<typename Real>
__global__
static void _vec_apply_log(Real* v, Real* flag, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
if (v[i] < 0) {
*flag = 1;
return;
}
v[i] = log(v[i]);
}
}
template<typename Real>
__global__
static void _cuda_comp_obj_deriv(MatrixElement<Real> *x, int s, const Real* z, MatrixDim d, Real* z2, MatrixDim d2, Real* t) {
int i = threadIdx.x;
__shared__ Real tot_objf[CU1DBLOCK];
__shared__ Real tot_weight[CU1DBLOCK];
Real tmp_weight_sum = 0;
Real tmp_tot_objf = 0;
int size = s / CU1DBLOCK; //the least size in a loop (later part)
int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1
int loop_start;
int loop_end;
if(i < threshold) {
loop_start = i * (size + 1);
loop_end = (i+1) * (size + 1);
}
else {
loop_start = threshold + i*size;
loop_end = threshold + (i+1)*size;
}
for(int j = loop_start; j< loop_end; j++) {
int m = (x + j)->row; //* ((int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )) );
int label = (x + j)->column; //*(int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )+ sizeof(int));
Real weight = (x + j)->weight; //*(Real*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) ) + 2 * sizeof(int));
tmp_weight_sum += weight;
Real this_prob = *(z + m * d.stride + label);
tmp_tot_objf += weight * log(this_prob);
*(z2 + m * d2.stride + label ) += weight / this_prob;// there might be problems here....
}
tot_objf[i] = tmp_tot_objf;
tot_weight[i] = tmp_weight_sum;
__syncthreads();
*t = _sum_reduce(tot_objf);
__syncthreads();
*(t+1) = _sum_reduce(tot_weight);
return;
}
template<typename Real>
__global__
static void _cuda_matrix_add_elements(Real *data, MatrixDim dim, Real alpha, MatrixElement<Real>* x, int s) {
int i = threadIdx.x;
if (i >= s)
return;
int size = s / CU1DBLOCK; //the least size in a loop (later part)
int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1
int loop_start;
int loop_end;
if(i < threshold) {
loop_start = i * (size + 1);
loop_end = (i+1) * (size + 1);
}
else {
loop_start = threshold + i*size;
loop_end = threshold + (i+1)*size;
}
for(int j = loop_start; j < loop_end; j++) {
*(data + x[j].row * dim.stride + x[j].column) += alpha * x[j].weight;
}
}
template<typename Real>
__global__
static void _matrix_lookup(const Real *data, MatrixDim dim,
const Int32Pair *indices,
int indices_size, Real *output) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= dim.rows || col >= dim.cols)
return;
for (int i = 0; i < indices_size; ++i) {
if (row == indices[i].first && col == indices[i].second) {
output[i] = data[row * dim.stride + col];
}
}
}
template<typename Real>
__global__
static void _equal_element_mask(const Real *mat1, const Real *mat2, Real *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; //col
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; //row
int32_cuda index_mat1 = i + j*mat1_dim.stride;
int32_cuda index_mat2 = i + j*mat2_stride;
int32_cuda index_mask = i + j*mask_stride;
if (i < mat1_dim.cols && j < mat1_dim.rows)
mask[index_mask] = (mat1[index_mat1] == mat2[index_mat2] ? 1.0 : 0.0);
}
template<typename Real>
__global__
static void _vec_sum(Real *v, Real *sum, int dim, int inc) {
int i = threadIdx.x;
__shared__ Real row_data[CU1DBLOCK];
if (i >= CU1DBLOCK) return;
Real tmp_sum = 0;
int size = dim / CU1DBLOCK; //the least size in a loop (later part)
int threshold = dim - size * CU1DBLOCK; //any loop below this number would + 1
int loop_start;
int loop_end;
if(i < threshold) {
loop_start = i * (size + 1);
loop_end = (i+1) * (size + 1);
}
else {
loop_start = threshold + i * size;
loop_end = threshold + (i+1) * size;
}
for(int j = loop_start; j< loop_end; j++) {
tmp_sum += v[j * inc];
}
row_data[threadIdx.x] = tmp_sum;
__syncthreads();
*sum = _sum_reduce(row_data);
}
template<typename Real>
__global__
static void _pvec_sum(Real* v, Real* g, int dim, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int start = size * i;
if (start >= dim) return;
int end = start + size;
if (end > dim) end = dim;
__shared__ Real row_data[CU1DBLOCK];
Real sum = 0;
for (int j = start; j < end; j++)
sum += v[j];
row_data[threadIdx.x] = sum;
__syncthreads();
g[blockIdx.x] = _sum_reduce(row_data);
}
template<typename Real>
__global__
static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < dim) {
if ( v[i] < floor_val) {
v[i] = floor_val;
count[i] = 1;
} else {
count[i] = 0;
}
}
}
// Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index.
// this is for no reason, really, I just happened to prefer this
// at the time. [dan]
template<typename Real>
__global__
static void _apply_pow(Real* mat, Real power, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i * d.stride + j;
if (i < d.rows && j < d.cols) {
if (power == 1.0)
return;
if (power == 2.0) {
mat[index] = mat[index] * mat[index];
} else if (power == 0.5) {
if (!(mat[index] >= 0.0))
return;
mat[index] = sqrt(mat[index]);
} else {
mat[index] = pow(mat[index], power);
}
}
}
template<typename Real>
__global__
static void _apply_pow_abs(Real* mat, Real power, bool include_sign, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i * d.stride + j;
if (i < d.rows && j < d.cols) {
if (include_sign == true && mat[index] < 0) {
if (power == 1.0)
mat[index] = -std::abs(mat[index]);
if (power == 2.0) {
mat[index] = -mat[index] * mat[index];
} else if (power == 0.5) {
mat[index] = -sqrt(std::abs(mat[index]));
} else {
mat[index] = -pow(std::abs(mat[index]), power);
}
} else {
if (power == 1.0)
mat[index] = std::abs(mat[index]);
if (power == 2.0) {
mat[index] = mat[index] * mat[index];
} else if (power == 0.5) {
mat[index] = sqrt(std::abs(mat[index]));
} else if (power < 0.0 && mat[index] == 0.0) {
mat[index] = 0.0;
} else {
mat[index] = pow(std::abs(mat[index]), power);
}
}
}
}
// Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index.
// this is for no reason, really, I just happened to prefer this
// at the time. [dan]
template<typename Real>
__global__
static void _apply_heaviside(Real* mat, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i * d.stride + j;
if (i < d.rows && j < d.cols) {
mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0);
}
}
template<typename Real>
__global__
static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
if (mat[index] < floor_val)
mat[index] = floor_val;
}
}
template<typename Real>
__global__
static void _copy_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) {
// Note: in this kernel, the x dimension corresponds to rows and the y to columns,
// as it will be going forward.
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dst_dim.rows && j < dst_dim.cols) {
int index = reorder[j],
dst_index = i * dst_dim.stride + j;
if (index >= 0) {
int src_index = i * src_stride + reorder[j];
Real val = src[src_index];
dst[dst_index] = val;
} else {
dst[dst_index] = 0.0;
}
}
}
template<typename Real>
__global__
static void _copy_rows(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) {
// Note: in this kernel, the x dimension corresponds to rows and the y to columns,
// as it will be going forward.
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dst_dim.rows && j < dst_dim.cols) {
int index = reorder[i],
dst_index = i * dst_dim.stride + j;
if (index >= 0) {
int src_index = reorder[i] * src_stride + j;
Real val = src[src_index];
dst[dst_index] = val;
} else {
dst[dst_index] = 0;
}
}
}
template<typename Real>
__global__
static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j * d.stride;
if (i < d.cols && j < d.rows ) {
if (mat[index] > ceiling_val)
mat[index] = ceiling_val;
}
}
template<typename Real>
__global__
static void _add_row_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int i = blockIdx.y * blockDim.y + threadIdx.y; //col
int j = blockIdx.x * blockDim.x + threadIdx.x; //row
if(blockIdx.x > 0) return;
if(blockDim.y != 1) return;
__shared__ Real row_data[CU1DBLOCK];
//copy the input to row_data
row_data[j] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[i] += sum;
}
template<typename Real>
__global__
static void _add_col_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; //row
int j = blockIdx.y * blockDim.y + threadIdx.y; //col
if(blockIdx.x > 0) return;
if(blockDim.y != 1) return;
__shared__ Real row_data[CU1DBLOCK];
//copy the input to row_data
row_data[i] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[j] += sum;
}
template<typename Real>
__global__
static void _invert_elements(Real* data, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j*d.stride;
if (i < d.cols && j < d.rows)
data[index] = 1.0/data[index];
}
// matrix-wise, do data = alpha * data + beta * A * B^T,
// where B is a block matrix.
template<typename Real>
__global__
static void _add_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols,
int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data,
int B_num_blocks, Real alpha, Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data"
int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B.
if (i >= A_num_rows || j >= B_num_blocks) return;
const CuBlockMatrixData &cu_data = B_cu_data[j];
// BT means B transposed.
int BT_row_start = cu_data.col_offset,
BT_col_start = cu_data.row_offset,
BT_num_rows = cu_data.matrix_dim.cols,
BT_num_cols = cu_data.matrix_dim.rows,
BT_col_stride = cu_data.matrix_dim.stride;
const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void;
// we avoided a bunch of hassle by doing this (relates to Ansi-C requirement).
for (int k = 0; k < BT_num_cols; k++) {
const Real *this_BT_col = B_data + k * BT_col_stride;
const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride;
// this_A_row points to the element A[i][BT_row_start], it's really just
// part of this row of A.
Real sum = 0.0;
for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B.
sum += this_BT_col[l] * this_A_row[l * A_col_stride];
int index = i * dim.stride + (k + BT_col_start);
data[index] = alpha * sum + beta * data[index];
}
}
template<typename Real>
__global__
static void _add_mat_blockmat(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols,
int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data,
int B_num_blocks, Real alpha, Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data"
int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B.
if (i >= A_num_rows || j >= B_num_blocks) return;
const CuBlockMatrixData &block_data = B_cu_data[j];
int B_row_start = block_data.row_offset,
B_col_start = block_data.col_offset,
B_num_rows = block_data.matrix_dim.rows,
B_num_cols = block_data.matrix_dim.cols,
B_row_stride = block_data.matrix_dim.stride;
const Real *B_data = static_cast<Real*>(block_data.matrix_data); // Cast from void;
// we avoided a bunch of hassle by doing this (relates to Ansi-C requirement).
for (int k = 0; k < B_num_cols; k++) {
const Real *this_B_col = B_data + k;
const Real *this_A_row = A_data + i * A_row_stride + B_row_start * A_col_stride;
// this_A_row points to the element A[i][B_row_start], it's really just
// part of this row of A.
Real sum = 0.0;
for (int l = 0; l < B_num_rows; l++) // l indexes rows of B.
sum += this_B_col[l * B_row_stride] * this_A_row[l * A_col_stride];
int index = i * dim.stride + (k + B_col_start);
data[index] = alpha * sum + beta * data[index];
}
}
// For a block matrix B, does B = alpha * C * D + beta * B.
// the (x,y,z) indices are the block index, then the row
// and column indices within the block. Note: transposition of C and D
// is handled by swapping the (num_rows,num_cols) and (row_stride,col_stride),
// so it's invisible to this code. The num-cols and num-rows of C and D
// are only provided to the extent that they are not already determined
// by other quantities.
template<typename Real>
__global__
static void _block_add_mat_mat(CuBlockMatrixData *B_cu_data, int num_blocks,
const Real *C_data, int C_num_cols,
int C_row_stride, int C_col_stride,
const Real *D_data,
int D_row_stride, int D_col_stride,
Real alpha, Real beta) {
int b = blockIdx.x * blockDim.x + threadIdx.x; // block-index into B.
int i = blockIdx.y * blockDim.y + threadIdx.y; // row-index into b'th block
int j = blockIdx.z * blockDim.z + threadIdx.z; // col-index into b'th block
if (b >= num_blocks) return;
const CuBlockMatrixData &block_data = B_cu_data[b];
if (i >= block_data.matrix_dim.rows || j >= block_data.matrix_dim.cols)
return; // we're outside the dimensions of the b'th block.
// B_elem is the element of B we're writing to.
Real *B_elem = reinterpret_cast<Real*>(block_data.matrix_data) +
i * block_data.matrix_dim.stride + j;
Real B_val = *B_elem;
// B_row and B_col are the (row, col) index into the full matrix B.
int B_row = block_data.row_offset + i, B_col = block_data.col_offset + j;
const Real *C_row_data = C_data + C_row_stride * B_row,
*D_col_data = D_data + D_col_stride * B_col;
Real sum = 0.0;
for (int k = 0; k < C_num_cols; k++) {
sum += C_row_data[k * C_col_stride] * D_col_data[k * D_row_stride];
}
*B_elem = alpha * sum + beta * B_val;
}
template<typename Real>
__global__
static void _blockadd_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols,
int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data,
int B_num_blocks, Real alpha, Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data"
int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B.
if (i >= A_num_rows || j >= B_num_blocks) return;
const CuBlockMatrixData &cu_data = B_cu_data[j];
// BT means B transposed.
int BT_row_start = cu_data.col_offset,
BT_col_start = cu_data.row_offset,
BT_num_rows = cu_data.matrix_dim.cols,
BT_num_cols = cu_data.matrix_dim.rows,
BT_col_stride = cu_data.matrix_dim.stride;
const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void;
// we avoided a bunch of hassle by doing this (relates to Ansi-C requirement).
for (int k = 0; k < BT_num_cols; k++) {
const Real *this_BT_col = B_data + k * BT_col_stride;
const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride;
// this_A_row points to the element A[i][BT_row_start], it's really just
// part of this row of A.
Real sum = 0.0;
for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B.
sum += this_BT_col[l] * this_A_row[l * A_col_stride];
int index = i * dim.stride + (k + BT_col_start);
data[index] = alpha * sum + beta * data[index];
}
}
/*
template<typename Real>
__global__
static void _block_conv_mat(Real *C_block, int C_row_stride, int C_num_rows,
const Real *A_block, int A_block_num_rows, int A_block_num_cols,
const Real *B_block, int B_block_num_rows, int B_block_num_cols) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index for A
int j = blockIdx.y * blockDim.y + threadIdx.y; // row_index into block c
int k = blockIdx.z * blockDim.z + threadIdx.z; // col_indx into block c
if (i > C_num_rows) return;
if (j >= (A_block_num_rows - B_block_num_rows + 1) ||
k >= (A_block_num_cols - B_block_num_cols + 1)) return; // we are outside dimension of block B;
Real sum = 0;
int A_index_const = i*(A_block_num_rows * A_block_num_cols)+j*A_block_num_cols+k;
for (int row = 0; row < B_block_num_rows; row++) {
for (int col = 0; col < B_block_num_cols; col++) {
int A_index = A_index_const + row * A_block_num_cols + col;
sum += A_block[A_index] * B_block[row * B_block_num_rows + col];
}
}
C_block[i * C_row_stride + j * (A_block_num_cols-B_block_num_cols+1) + k] = sum;
}
*/
template<typename Real>
__global__
static void _block_conv_mat(Real *C, int C_row_stride, int C_block_row_stride,
int C_block_num_rows, int C_block_num_cols,
const Real *A, int block_dim_x, int A_num_rows,
int A_block_num_rows, int A_block_num_cols, const Real *B, int block_dim_y,
int B_block_num_rows, int B_block_num_cols) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index for A
int j = blockIdx.y * blockDim.y + threadIdx.y; // block_index into blocks A and B
int k = blockIdx.z * blockDim.z + threadIdx.z; // indx into each block of c
if (i > A_num_rows) return;
if (j >= (block_dim_x*block_dim_y) ||
(k >= C_block_row_stride)) return; // we are outside dimension of block B;
int block_num = j / block_dim_y;
int filter_num = j % block_dim_y;
int C_block_row_num = k / C_block_num_cols;
int C_block_col_num = k % C_block_num_cols;
Real sum = 0;
int A_index_const = (i*block_dim_x + block_num) * (A_block_num_rows * A_block_num_cols)+
C_block_row_num * A_block_num_cols + C_block_col_num;
int B_index_const = j * B_block_num_rows * B_block_num_cols;
__shared__ Real filter[CU1DBLOCK];//[B_block_num_rows * B_block_num_cols];
for (int row = 0; row < B_block_num_rows; row++)
for (int col = 0; col < B_block_num_cols; col++)
filter[row*B_block_num_cols+col] = B[B_index_const + row * B_block_num_rows + col];
__syncthreads();
for (int row = 0; row < B_block_num_rows; row++) {
for (int col = 0; col < B_block_num_cols; col++) {
int A_index = A_index_const + row * A_block_num_cols + col;
sum += __mul24(A[A_index], filter[row * B_block_num_rows + col]);
//sum += __mul24(A[A_index], B[B_index_const + row * B_block_num_rows + col]);
}
}
__syncthreads();
int C_index = i * C_row_stride + j * C_block_row_stride + k;
C[C_index] = sum;
}
// Since this is a newer kernel, x is the row-index and y is the
// column-index.
template<typename Real>
__global__
static void _sum_column_ranges(Real *data, MatrixDim dim,
const Real *src_data,
MatrixDim src_dim,
const Int32Pair *indices) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= dim.rows || col >= dim.cols)
return;
int dst_index = row * dim.stride + col,
src_start_index = row * src_dim.stride + indices[col].first,
src_end_index = row * src_dim.stride + indices[col].second;
Real sum = 0.0;
for (int index = src_start_index; index < src_end_index; index++)
sum += src_data[index];
data[dst_index] = sum;
}
template<typename Real>
__global__
static void _soft_hinge(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j*d.stride, src_index = i + j*src_stride;
// compute the function y[index] = log(1 + exp(x[index]))
if(i < d.cols && j < d.rows) {
Real val = x[src_index], result;
if (val >= 10.0) result = val; // function approaches y=x as x gets large
else result = log1p(exp(val));
y[dst_index] = result;
}
}
template<typename Real>
__global__
static void _group_pnorm(Real *y, const Real *x, MatrixDim d, int src_stride,
int group_size, Real power) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < d.rows && i < d.cols) {
int dst_index = i + j * d.stride;
Real tmp = 0;
int src_begin_index = i * group_size + j * src_stride;
int src_end_index = src_begin_index + group_size;
for (int src_index = src_begin_index; src_index < src_end_index;
src_index ++) {
tmp += pow(std::abs(x[src_index]), power);
}
tmp = pow(tmp, Real(1.0 / power));
if (!isnan(tmp)) {
y[dst_index] = tmp;
} else {
Real max_value = x[src_begin_index], min_value = max_value;
for (int src_index = src_begin_index + 1;
src_index < src_end_index; src_index ++) {
if (x[src_index] > max_value)
max_value = x[src_index];
if (x[src_index] < min_value)
min_value = x[src_index];
}
tmp = 0.0;
Real max_abs_value = (max_value > -min_value ?
max_value : -min_value); // let max_value be the
// largest abs(value)
if (max_abs_value == 0) {
y[dst_index] = 0.0;
} else {
for (int src_index = src_begin_index;
src_index < src_end_index; src_index ++) {
Real x_scaled = x[src_index] / max_abs_value;
tmp += pow(std::abs(x_scaled), Real(power));
}
y[dst_index] = pow(tmp, Real(1.0 / power)) * max_abs_value;
}
}
}
}
/*
* cu::
*/
template<typename Real>
__global__
static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j*d.stride, src_index = i + j*src_stride;
if(i < d.cols && j < d.rows) {
Real res = 1.0 / (1.0 + exp(-x[src_index]));
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j*d.stride, src_index = i + j*src_stride;
if (i < d.cols && j < d.rows )
eout[dst_index] = y[src_index]*(1.0-y[src_index]) * e[src_index];
}
template<typename Real>
__global__
static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j*d.stride, src_index = i + j * src_stride;
if(i < d.cols && j < d.rows) {
Real exp_2x = exp(2.0*x[src_index]);
Real res;
if(isinf(exp_2x)) {
res = 1.0;
} else {
res = (exp_2x - 1.0) / (exp_2x + 1.0);
}
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _relu(Real*y, const Real*x, MatrixDim d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j*d.stride, src_index = i + j * src_stride;
if( i < d.cols && j < d.rows ) {
y[dst_index] = (x[src_index] > 0 ) ? x[src_index] : 0;
}
}
template<typename Real>
__global__
static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j*d.stride;
if (i < d.cols && j < d.rows )
eout[index] = (1.0 - y[index]*y[index]) * e[index];
}
template<typename Real>
__global__
static void _diff_relu(Real*eout, const Real*e, const Real*y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows )
eout[index] = (y[index] > 0) ? e[index] : 0;
}
template<typename Real>
__global__
static void _softmax(Real*y, const Real*x, MatrixDim d) {
int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x;
if(j >= d.rows) return;
//copy to output and find max...
double max = -1e20;
double sum = 0.0;
for(int32_cuda i=0; i<d.cols; i++) {
if(max < x[i+j*d.stride]) max = x[i+j*d.stride];
y[i+j*d.stride] = x[i+j*d.stride];
}
//subtract max, apply exp, sum up...
for(int32_cuda i=0; i<d.cols; i++) {
y[i+j*d.stride] = exp(y[i+j*d.stride] - max);
sum += y[i+j*d.stride];
}
//normalize by sum...
for(int32_cuda i=0; i<d.cols; i++) {
y[i+j*d.stride] /= sum;
}
}
template<typename Real>
__global__
static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) {
int j = blockIdx.x;
int THREADS = blockDim.x;
if (j >= d.rows) return;
__shared__ Real aux[CU1DBLOCK];
int steps = (d.cols - 1) / THREADS + 1;
//copy input to aux
aux[threadIdx.x] = x[threadIdx.x+j*d.stride];
for(int i=1; i<steps; ++i) {
if(threadIdx.x+i*THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x+i*THREADS+j*d.stride])
aux[threadIdx.x] = x[threadIdx.x+i*THREADS+j*d.stride];
}
//get the maximum value
int nTotalThreads = THREADS;
__syncthreads();
while(nTotalThreads > 1) {
int halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
if(threadIdx.x+halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x+halfPoint])
aux[threadIdx.x] = aux[threadIdx.x + halfPoint];
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
Real max = aux[0];
__syncthreads();
// subtract max, apply exp, sum up...
y[threadIdx.x+j*d.stride] = exp(x[threadIdx.x+j*d.stride] - max);
aux[threadIdx.x] = y[threadIdx.x+j*d.stride];
for(int i=1; i<steps; i++) {
if(threadIdx.x+i*THREADS < d.cols) {
y[threadIdx.x+i*THREADS+j*d.stride] = exp(x[threadIdx.x+i*THREADS+j*d.stride] - max);
aux[threadIdx.x] += y[threadIdx.x+i*THREADS+j*d.stride];
}
}
nTotalThreads = THREADS;
__syncthreads();
while(nTotalThreads > 1) {
int halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
if(threadIdx.x+halfPoint < nTotalThreads)
aux[threadIdx.x] += aux[threadIdx.x + halfPoint];
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
Real sum = aux[0];
__syncthreads();
//normalize by sum...
for(int i=0; i<steps; i++) {
if(threadIdx.x+i*THREADS < d.cols) {
y[threadIdx.x+i*THREADS+j*d.stride] = y[threadIdx.x+i*THREADS+j*d.stride] / sum;
}
}
}
template<typename Real>
__global__
static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if (i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = i % d_in.cols;
int32_cuda src_row = j + off[i / d_in.cols];
if(src_row < 0) src_row = 0;
if(src_row >= d_in.rows) src_row = d_in.rows-1;
y[index] = x[src_col + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _take_mean(const Real* x, Real* y, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index1 = i + j * d_in.stride;
int32_cuda index2 = j + i * d_in.stride;
if (i <= j && j < d_in.rows) {
int32_cuda index_sp = (j * (j+1) / 2) + i;
y[index_sp] = 0.5 * (x[index1] + x[index2]);
}
}
template<typename Real>
__global__
static void _take_lower(const Real* x, Real* y, MatrixDim d_in) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index
int j = blockIdx.y * blockDim.y + threadIdx.y; // col-index
if (j > i || i >= d_in.rows) return;
int index = i * d_in.stride + j;
Real val = x[index];
int index_sp = (i * (i+1) / 2) + j;
y[index_sp] = val;
}
template<typename Real>
__global__
static void _take_upper(const Real* x, Real* y, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index
if (j < i || j >= d_in.rows) return;
int32_cuda index = i * d_in.stride + j;
int32_cuda index_sp = (j * (j+1) / 2) + i;
y[index_sp] = x[index];
}
template<typename Real>
__global__
static void _vec_copy_diag_from_packed(Real* y, const Real* x, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i+1) * (i+2) / 2) - 1;
if (i < dim) {
y[i] = x[index];
}
}
template<typename Real>
__global__
static void _copy_from_sp(const Real* x, Real* y, MatrixDim dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dim.rows && j < dim.cols) {
int dst_index = i * dim.stride + j, src_index;
if (j <= i) { // no transpose
src_index = (i * (i+1) / 2) + j;
} else { // transpose.
src_index = (j * (j+1) / 2) + i;
}
y[dst_index] = x[src_index];
}
}
template<typename Real>
__global__
static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if (i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = copy_from[i];
if(src_col >= 0 && src_col < d_in.cols) {
y[index] = x[src_col + j*d_in.stride];
} else {
y[index] = 1.0/0.0;
}
}
}
template<typename Real>
__global__
static void _one(Real* x, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < dim ) {
x[i] = 1.0;
}
}
template<typename Real>
__global__
static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if (i < d_out.cols && j < d_out.rows ) {
int32_cuda src_row = copy_from[j];
y[index] = x[i + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows) {
if(wei[index]==0.0) return; //skip L1 if zero weight!
Real l1_signed = l1;
if(wei[index] < 0.0) //flip sign
l1_signed = -l1;
Real before = wei[index];
Real after = wei[index] -lr*grad[index] -l1_signed;//simulate update
if((after > 0.0) ^ (before > 0.0)) { //sign changed?
wei[index] = 0.0;
grad[index] = 0.0;
} else {
wei[index] -= l1_signed;
}
}
}
template<typename Real>
__global__
static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if(blockIdx.x > 0) return;
if(blockDim.y != 1) return;
__shared__ Real value[CU1DBLOCK];
__shared__ int32_cuda index[CU1DBLOCK];
//copy to shared memory
value[threadIdx.x] = mat[i+j*d.stride];
index[threadIdx.x] = threadIdx.x;
__syncthreads();
//get the id of the max value
int32_cuda out_max = _max_id_reduce(value, index);
__syncthreads();
//see if it's bigger value
if(threadIdx.x == 0) {
if(vec_val[j] <= mat[out_max+j*d.stride]) {
vec_val[j] = mat[out_max+j*d.stride];
vec_id[j] = voff+out_max;
}
}
}
template<typename Real>
__global__
static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if(i>0) return;
if(j<d.rows) {
int32_cuda index = vec_tgt[j] + j*d.stride;
Real value = mat_net_out[index];
if(value < 1e-20) value = 1e-20;
vec_log_post[j] = log(value);
mat_net_out[index] -= 1.0;
}
}
template<typename Real>
__global__
static void _softmax_part(const Real* X, const int32_cuda* vec_ids, Real* Y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows) {
Real tmp = X[index] - X[vec_ids[j] + j*d.stride];
Y[index] = exp(tmp);
}
}
/***********************************************************************
* ANSI-C wrappers of CUDA kernels
*/
/*
* "int32"
*/
void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
/*
* "float"
*/
/*
* CuMatrix
*/
void cudaF_copy_upp_low(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) { _copy_upp_low<<<Gr,Bl>>>(A,dimA); }
void cudaF_copy_low_upp(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) { _copy_low_upp<<<Gr,Bl>>>(A,dimA); }
void cudaF_add_diag_vec_mat(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim,
const float *vec, const float *mat2, int mat2_row_stride,
int mat2_col_stride, float beta) {
_add_diag_vec_mat<<<Gr,Bl>>>(alpha, mat, mat_dim, vec, mat2, mat2_row_stride,
mat2_col_stride, beta);
}
void cudaF_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) {
_copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat);
}
void cudaFD_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) {
_copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat);
}
void cudaF_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) {
_copy_from_tp<<<Gr,Bl>>>(A,B,dmat);
}
void cudaFD_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) {
_copy_from_tp<<<Gr,Bl>>>(A,B,dmat);
}
void cudaF_copy_col_from_vec(int Gr, int Bl, float* mat, const float* v, int col, MatrixDim d) {
_copy_col_from_vec<<<Gr,Bl>>>(mat,v,col,d);
}
void cudaF_transpose_matrix(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_transpose_matrix<<<Gr,Bl>>>(mat, d);
}
void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_apply_exp<<<Gr,Bl>>>(mat,d);
}
void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) {
_apply_pow<<<Gr,Bl>>>(mat, power, d);
}
void cudaF_apply_pow_abs(dim3 Gr, dim3 Bl, float* mat, float power, bool include_sign, MatrixDim d) {
_apply_pow_abs<<<Gr,Bl>>>(mat, power, include_sign, d);
}
void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_apply_heaviside<<<Gr,Bl>>>(mat, d);
}
void cudaF_copy_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) {
_copy_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride);
}
void cudaF_copy_rows(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) {
_copy_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride);
}
void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) {
_apply_floor<<<Gr,Bl>>>(mat, floor_val, d);
}
void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val, MatrixDim d) {
_apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d);
}
void cudaF_set_diag(int Gr, int Bl, float* mat, float value, MatrixDim d) {
_set_diag<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_set_diag_packed(int Gr, int Bl, float* mat, float value, int dim) {
_set_diag_packed<<<Gr,Bl>>>(mat,value,dim);
}
void cudaF_add_diag_packed(int Gr, int Bl, float* mat, float value, int dim) {
_add_diag_packed<<<Gr,Bl>>>(mat,value,dim);
}
void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_set_zero_above_diag(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_set_zero_above_diag<<<Gr,Bl>>>(mat, d);
}
void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_add<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_scale_diag(int Gr, int Bl, float* mat, float value, int dim) {
_scale_diag<<<Gr,Bl>>>(mat,value,dim);
}
void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_scale<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_apply_log<<<Gr,Bl>>>(mat,d);
}
void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) {
_mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride);
}
void cudaF_max(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) {
_max<<<Gr,Bl>>>(mat,A,dst_d,src_stride);
}
void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) {
_mul_cols_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) {
_mul_rows_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaF_mul_rows_group_mat(dim3 Gr, dim3 Bl, float *y, const float *x,
MatrixDim d, int src_stride, int group_size) {
_mul_rows_group_mat<<<Gr,Bl>>>(y, x, d, src_stride, group_size);
}
void cudaF_calc_pnorm_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1,
const float *x2, MatrixDim d, int src_stride,
int group_size, float power) {
_calc_pnorm_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size, power);
}
void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) {
_div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d);
}
void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst, MatrixDim d, int src_stride, int A_trans) {
if (A_trans) {
_add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride);
} else {
_add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride);
}
}
void cudaF_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const float *A, const float *B, const float *C, float *dst, MatrixDim d) {
_add_mat_mat_div_mat<<<Gr,Bl>>>(A,B,C,dst,d);
}
void cudaF_sy_add_tr2(dim3 Gr, dim3 Bl, float alpha, float beta, const float* T, MatrixDim tdim,
float *S, MatrixDim sdim) {
_sy_add_tr2<<<Gr,Bl>>>(alpha, beta, T, tdim, S, sdim);
}
void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) {
_add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d);
}
void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) {
_add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d);
}
// CURRENTLY UNUSED...
void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
_apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaF_replace_value(int Gr, int Bl, float *v, int dim, float orig, float changed) {
_replace_value<<<Gr,Bl>>>(v, dim, orig, changed);
}
void cudaF_set_bias_params(int Gr, int Bl, float* v, const float* a, float param_1, float param_2, float param_3, int* flag, int dim) {
_set_bias_params<<<Gr,Bl>>>(v,a,param_1,param_2,param_3,flag,dim);
}
void cudaF_copy_from_vec_df(int Gr, int Bl, double* v_out, const float* v_in, int dim) {
_copy_from_vec_df<<<Gr,Bl>>>(v_out,v_in,dim);
}
void cudaF_copy_from_vec_fd(int Gr, int Bl, float* v_out, const float* v_in, int dim) {
_copy_from_vec_fd<<<Gr,Bl>>>(v_out,v_in,dim);
}
void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) {
_vec_mul_elements<<<Gr,Bl>>>(v, a, dim);
}
void cudaF_vec_min(const float* v, float* value, int dim) {
_vec_min<<<1,CU1DBLOCK>>>(v, value, dim);
}
void cudaF_vec_max(const float* v, float* value, int dim) {
_vec_max<<<1,CU1DBLOCK>>>(v, value, dim);
}
void cudaF_trace_mat_mat_trans(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) {
_trace_mat_mat_trans<float,4> <<<4,CU1DBLOCK>>>(A,B,dA,B_stride,value);
}
void cudaF_trace_mat_mat(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) {
_trace_mat_mat<float,2> <<<2,CU1DBLOCK>>>(A,B,dA,B_stride,value);
}
void cudaF_add_diag_mat_mat(int Gr, int Bl, float alpha, float* v, int v_dim, const float* M,
int M_cols, int M_row_stride, int M_col_stride, const float *N, int N_row_stride,
int N_col_stride, int threads_per_element, float beta) {
_add_diag_mat_mat<<<Gr,Bl>>>(alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride,
N, N_row_stride, N_col_stride, threads_per_element, beta);
}
void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x, const float* y, float beta, int dim) {
_add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim);
}
void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) {
_vec_sum<<<Gr,Bl>>>(v, value, dim, inc);
}
void cudaF_pvec_sum(int Gr, int Bl, float* v, float* pvec_sum, int dim, int size) {
_pvec_sum<<<Gr,Bl>>>(v, pvec_sum, dim, size);
}
void cudaF_matrix_add_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, float alpha, MatrixElement<float>* x, int s) {
_cuda_matrix_add_elements<<<Gr, Bl>>>(data, dim, alpha, x, s);
}
void cudaF_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<float>* x, int s, const float* z, MatrixDim d, float* z2, MatrixDim d2, float* t) {
_cuda_comp_obj_deriv<<<Gr,Bl>>>(x,s,z,d,z2,d2,t);
}
void cudaD_comp_obj_deriv(dim3 Gr,dim3 Bl, MatrixElement<double>* x, int s, const double* z, MatrixDim d, double* z2, MatrixDim d2, double* t) {
_cuda_comp_obj_deriv<<<Gr,Bl>>>(x,s,z,d,z2,d2,t);
}
void cudaF_vec_copy_diag_from_packed(int Gr, int Bl, float *dst, const float *src, int dim) {
_vec_copy_diag_from_packed<<<Gr,Bl>>>(dst,src,dim);
}
void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val, float *count, int dim) {
_vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim);
}
void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) {
_vec_apply_exp<<<Gr,Bl>>>(v,dim);
}
void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) {
_vec_apply_log<<<Gr,Bl>>>(v,flag,dim);
}
void cudaF_add_row_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
_add_row_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaF_add_col_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
_add_col_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) {
_invert_elements<<<Gr,Bl>>>(data, d);
}
void cudaF_add_mat_blockmat(dim3 Gr, dim3 Bl, float *data, MatrixDim d, const float *Adata,
int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride,
const CuBlockMatrixData *B_cu_data, int B_num_blocks,
float alpha, float beta, int B_trans) {
if (B_trans) {
_add_mat_blockmat_trans<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data,
B_num_blocks, alpha, beta);
} else {
_add_mat_blockmat<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data,
B_num_blocks, alpha, beta);
}
}
void cudaF_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks,
const float *C_data, int C_num_cols, int C_row_stride, int C_col_stride,
const float *D_data, int D_row_stride, int D_col_stride,
float alpha, float beta) {
_block_add_mat_mat<<<Gr,Bl>>>(B_cu_data, num_blocks, C_data, C_num_cols,
C_row_stride, C_col_stride, D_data, D_row_stride,
D_col_stride, alpha, beta);
}
void cudaF_block_conv_mat(dim3 Gr, dim3 Bl, float *C, int C_row_stride, int C_block_row_stride,
int C_block_num_rows, int C_block_num_cols,
const float *A, int block_dim_x, int A_num_rows, int A_block_num_rows,
int A_block_num_cols, const float *B, int block_dim_y,
int B_block_num_rows, int B_block_num_cols) {
_block_conv_mat<<<Gr, Bl>>>(C, C_row_stride, C_block_row_stride,
C_block_num_rows, C_block_num_cols,
A, block_dim_x, A_num_rows, A_block_num_rows, A_block_num_cols,
B, block_dim_y, B_block_num_rows, B_block_num_cols);
}
/*
* cu::
*/
void cudaF_soft_hinge (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) {
_soft_hinge<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaF_group_pnorm(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size, float power) {
_group_pnorm<<<Gr,Bl>>>(y, x, d, src_stride, group_size, power);
}
void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) {
_sigmoid<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int src_stride) {
_diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, src_stride);
}
void cudaF_tanh (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) {
_tanh<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaF_diff_tanh (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d) {
_diff_tanh<<<Gr,Bl>>>(eout, e, y, d);
}
void cudaF_relu (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) {
_relu<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaF_diff_relu (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d) {
_diff_relu<<<Gr,Bl>>>(eout, e, y, d);
}
void cudaF_softmax (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d) {
_softmax<<<Gr,Bl>>>(y, x, d);
}
void cudaF_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) {
_softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaF_softmax_part(dim3 Gr, dim3 Bl, const float* X, const int32_cuda* vec_ids, float* Y, MatrixDim d) {
_softmax_part<<<Gr,Bl>>>(X,vec_ids,Y,d);
}
void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
_splice<<<Gr,Bl>>>(y,x,off,d_out,d_in);
}
void cudaF_one(int Gr, int Bl, float* x, int dim) {
_one<<<Gr,Bl>>>(x,dim);
}
void cudaF_take_mean(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) {
_take_mean<<<Gr,Bl>>>(x,y,d_in);
}
void cudaF_take_lower(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) {
_take_lower<<<Gr,Bl>>>(x,y,d_in);
}
void cudaF_take_upper(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) {
_take_upper<<<Gr,Bl>>>(x,y,d_in);
}
void cudaF_copy_from_sp(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim dim) {
_copy_from_sp<<<Gr,Bl>>>(x, y, dim);
}
void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d) {
_regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d);
}
void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
_find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d);
}
void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) {
_diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d);
}
void cudaF_copy_rows_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out, const float *v_in) {
_copy_rows_from_vec<<<Gr,Bl>>>(mat_out, d_out, v_in);
}
void cudaF_copy_col_from_mat(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) {
_copy_col_from_mat<<<Gr,Bl>>>(v,col,mat,dmat,dim);
}
void cudaF_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const float* mat, MatrixDim dmat, int dim) {
_copy_col_from_mat_df<<<Gr,Bl>>>(v,col,mat,dmat,dim);
}
void cudaF_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) {
_copy_col_from_mat_fd<<<Gr,Bl>>>(v,col,mat,dmat,dim);
}
void cudaF_sum_column_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim,
const float *src_data, MatrixDim src_dim,
const Int32Pair *indices) {
_sum_column_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indices);
}
void cudaF_matrix_lookup(dim3 Gr, dim3 Bl, const float *data, MatrixDim dim,
const Int32Pair *indices, int indices_size,
float *output) {
_matrix_lookup<<<Gr,Bl>>>(data, dim, indices, indices_size, output);
}
void cudaF_equal_element_mask(dim3 Gr, dim3 Bl, const float *mat1,
const float *mat2, float *mask, MatrixDim mat1_dim,
int mat2_stride, int mask_stride) {
_equal_element_mask<<<Gr,Bl>>>(mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride);
}
/*
* "double"
*/
/*
* CuMatrix
*/
void cudaD_copy_upp_low(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) { _copy_upp_low<<<Gr,Bl>>>(A,dimA); }
void cudaD_copy_low_upp(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) { _copy_low_upp<<<Gr,Bl>>>(A,dimA); }
void cudaD_add_diag_vec_mat(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim,
const double *vec, const double *mat2, int mat2_row_stride,
int mat2_col_stride, double beta) {
_add_diag_vec_mat<<<Gr,Bl>>>(alpha, mat, mat_dim, vec, mat2, mat2_row_stride,
mat2_col_stride, beta);
}
void cudaD_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) {
_copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat);
}
void cudaDF_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) {
_copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat);
}
void cudaD_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) {
_copy_from_tp<<<Gr,Bl>>>(A,B,dmat);
}
void cudaDF_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) {
_copy_from_tp<<<Gr,Bl>>>(A,B,dmat);
}
void cudaD_copy_col_from_vec(int Gr, int Bl, double* mat, const double* v, int col, MatrixDim d) {
_copy_col_from_vec<<<Gr,Bl>>>(mat,v,col,d);
}
void cudaD_transpose_matrix(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_transpose_matrix<<<Gr,Bl>>>(mat, d);
}
void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_apply_exp<<<Gr,Bl>>>(mat,d);
}
void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) {
_apply_pow<<<Gr,Bl>>>(mat, power, d);
}
void cudaD_apply_pow_abs(dim3 Gr, dim3 Bl, double* mat, double power, bool include_sign, MatrixDim d) {
_apply_pow_abs<<<Gr,Bl>>>(mat, power, include_sign, d);
}
void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_apply_heaviside<<<Gr,Bl>>>(mat, d);
}
void cudaD_copy_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) {
_copy_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride);
}
void cudaD_copy_rows(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) {
_copy_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride);
}
void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) {
_apply_floor<<<Gr,Bl>>>(mat, floor_val, d);
}
void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val, MatrixDim d) {
_apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d);
}
void cudaD_set_diag(int Gr, int Bl, double* mat, double value, MatrixDim d) {
_set_diag<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_set_diag_packed(int Gr, int Bl, double* mat, double value, int dim) {
_set_diag_packed<<<Gr,Bl>>>(mat,value,dim);
}
void cudaD_add_diag_packed(int Gr, int Bl, double* mat, double value, int dim) {
_add_diag_packed<<<Gr,Bl>>>(mat,value,dim);
}
void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_set_zero_above_diag(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_set_zero_above_diag<<<Gr,Bl>>>(mat, d);
}
void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_add<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_scale_diag(int Gr, int Bl, double* mat, double value, int dim) {
_scale_diag<<<Gr,Bl>>>(mat,value,dim);
}
void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_scale<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_apply_log<<<Gr,Bl>>>(mat,d);
}
void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) {
_mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride);
}
void cudaD_max(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) {
_max<<<Gr,Bl>>>(mat,A,dst_d,src_stride);
}
void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) {
_mul_cols_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) {
_mul_rows_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaD_mul_rows_group_mat(dim3 Gr, dim3 Bl, double* y, const double* x,
MatrixDim d, int src_stride, int group_size) {
_mul_rows_group_mat<<<Gr,Bl>>>(y, x, d, src_stride, group_size);
}
void cudaD_calc_pnorm_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1,
const double* x2, MatrixDim d, int src_stride,
int group_size, double power) {
_calc_pnorm_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size, power);
}
void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) {
_div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d);
}
void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src, double* dst, MatrixDim d, int src_stride, int A_trans) {
if (A_trans) {
_add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride);
} else {
_add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride);
}
}
void cudaD_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const double *A, const double *B, const double *C, double *dst, MatrixDim d) {
_add_mat_mat_div_mat<<<Gr,Bl>>>(A,B,C,dst,d);
}
void cudaD_sy_add_tr2(dim3 Gr, dim3 Bl, double alpha, double beta, const double* T, MatrixDim tdim,
double *S, MatrixDim sdim) {
_sy_add_tr2<<<Gr,Bl>>>(alpha, beta, T, tdim, S, sdim);
}
void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) {
_add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d);
}
void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) {
_add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d);
}
// CURRENTLY UNUSED...
void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
_apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaD_replace_value(int Gr, int Bl, double *v, int dim, double orig, double changed) {
_replace_value<<<Gr,Bl>>>(v, dim, orig, changed);
}
void cudaD_set_bias_params(int Gr, int Bl, double* v, const double* a, double param_1, double param_2, double param_3, int* flag, int dim) {
_set_bias_params<<<Gr,Bl>>>(v,a,param_1,param_2,param_3,flag,dim);
}
void cudaD_copy_from_vec_df(int Gr, int Bl, double* v_out, const double* v_in, int dim) {
_copy_from_vec_df<<<Gr,Bl>>>(v_out,v_in,dim);
}
void cudaD_copy_from_vec_fd(int Gr, int Bl, float* v_out, const double* v_in, int dim) {
_copy_from_vec_fd<<<Gr,Bl>>>(v_out,v_in,dim);
}
void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a, int dim) {
_vec_mul_elements<<<Gr,Bl>>>(v, a, dim);
}
void cudaD_vec_min(const double* v, double* value, int dim) {
_vec_min<<<1,CU1DBLOCK>>>(v, value, dim);
}
void cudaD_vec_max(const double* v, double* value, int dim) {
_vec_max<<<1,CU1DBLOCK>>>(v, value, dim);
}
void cudaD_trace_mat_mat_trans(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) {
_trace_mat_mat_trans<double,4> <<<4,CU1DBLOCK>>>(A,B,dA,B_stride,value);
}
void cudaD_trace_mat_mat(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) {
_trace_mat_mat<double,2> <<<2,CU1DBLOCK>>>(A,B,dA,B_stride,value);
}
void cudaD_add_diag_mat_mat(int Gr, int Bl, double alpha, double* v, int v_dim, const double* M,
int M_cols, int M_row_stride, int M_col_stride, const double *N, int N_row_stride,
int N_col_stride, int threads_per_element, double beta) {
_add_diag_mat_mat<<<Gr,Bl>>>(alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride,
N, N_row_stride, N_col_stride, threads_per_element, beta);
}
void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x, const double* y, double beta, int dim) {
_add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim);
}
void cudaD_copy_col_from_mat(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) {
_copy_col_from_mat<<<Gr,Bl>>>(v,col,mat,dmat,dim);
}
void cudaD_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) {
_copy_col_from_mat_df<<<Gr,Bl>>>(v,col,mat,dmat,dim);
}
void cudaD_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const double* mat, MatrixDim dmat, int dim) {
_copy_col_from_mat_fd<<<Gr,Bl>>>(v,col,mat,dmat,dim);
}
void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) {
_vec_sum<<<Gr,Bl>>>(v,value,dim,inc);
}
void cudaD_pvec_sum(int Gr, int Bl, double* v, double* pvec_sum, int dim, int size) {
_pvec_sum<<<Gr,Bl>>>(v,pvec_sum,dim,size);
}
void cudaD_matrix_add_elements(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, double alpha, MatrixElement<double>* x, int s) {
_cuda_matrix_add_elements<<<Gr, Bl>>>(data, dim, alpha, x, s);
}
void cudaD_vec_copy_diag_from_packed(int Gr, int Bl, double *dst, const double *src, int dim) {
_vec_copy_diag_from_packed<<<Gr,Bl>>>(dst,src,dim);
}
void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val, float *count, int dim) {
_vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim);
}
void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) {
_vec_apply_exp<<<Gr,Bl>>>(v,dim);
}
void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) {
_vec_apply_log<<<Gr,Bl>>>(v,flag,dim);
}
void cudaD_add_row_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
_add_row_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaD_add_col_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
_add_col_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) {
_invert_elements<<<Gr,Bl>>>(data, d);
}
void cudaD_add_mat_blockmat(dim3 Gr, dim3 Bl, double *data, MatrixDim d, const double *Adata,
int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride,
const CuBlockMatrixData *B_cu_data, int B_num_blocks,
double alpha, double beta, int B_trans) {
if (B_trans) {
_add_mat_blockmat_trans<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data,
B_num_blocks, alpha, beta);
} else {
_add_mat_blockmat<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data,
B_num_blocks, alpha, beta);
}
}
void cudaD_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks,
const double *C_data, int C_num_cols, int C_row_stride, int C_col_stride,
const double *D_data, int D_row_stride, int D_col_stride,
double alpha, double beta) {
_block_add_mat_mat<<<Gr,Bl>>>(B_cu_data, num_blocks, C_data, C_num_cols,
C_row_stride, C_col_stride, D_data, D_row_stride,
D_col_stride, alpha, beta);
}
void cudaD_block_conv_mat(dim3 Gr, dim3 Bl, double *C, int C_row_stride, int C_block_row_stride,
int C_block_num_rows, int C_block_num_cols,
const double *A, int block_dim_x, int A_num_rows, int A_block_num_rows,
int A_block_num_cols, const double *B, int block_dim_y,
int B_block_num_rows, int B_block_num_cols) {
_block_conv_mat<<<Gr, Bl>>>(C, C_row_stride, C_block_row_stride,
C_block_num_rows, C_block_num_cols,
A, block_dim_x, A_num_rows, A_block_num_rows, A_block_num_cols,
B, block_dim_y, B_block_num_rows, B_block_num_cols);
}
/*
* cu::
*/
void cudaD_soft_hinge (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) {
_soft_hinge<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaD_group_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride, int group_size, double power) {
_group_pnorm<<<Gr,Bl>>>(y, x, d, src_stride, group_size, power);
}
void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) {
_sigmoid<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int src_stride) {
_diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, src_stride);
}
void cudaD_tanh (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) {
_tanh<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaD_diff_tanh (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d) {
_diff_tanh<<<Gr,Bl>>>(eout, e, y, d);
}
void cudaD_relu (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) {
_relu<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaD_diff_relu (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d) {
_diff_relu<<<Gr,Bl>>>(eout, e, y, d);
}
void cudaD_softmax (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d) {
_softmax<<<Gr,Bl>>>(y, x, d);
}
void cudaD_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) {
_softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaD_softmax_part(dim3 Gr, dim3 Bl, const double* X, const int32_cuda* vec_ids, double* Y, MatrixDim d) {
_softmax_part<<<Gr,Bl>>>(X,vec_ids,Y,d);
}
void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
_splice<<<Gr,Bl>>>(y,x,off,d_out,d_in);
}
void cudaD_one(int Gr, int Bl, double* x, int dim) {
_one<<<Gr,Bl>>>(x,dim);
}
void cudaD_take_mean(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) {
_take_mean<<<Gr,Bl>>>(x,y,d_in);
}
void cudaD_take_lower(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) {
_take_lower<<<Gr,Bl>>>(x,y,d_in);
}
void cudaD_take_upper(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) {
_take_upper<<<Gr,Bl>>>(x,y,d_in);
}
void cudaD_copy_from_sp(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_out) {
_copy_from_sp<<<Gr,Bl>>>(x,y,d_out);
}
void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d) {
_regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d);
}
void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
_find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d);
}
void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) {
_diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d);
}
void cudaD_copy_rows_from_vec(dim3 Gr, dim3 Bl, double *mat_out, MatrixDim d_out, const double *v_in) {
_copy_rows_from_vec<<<Gr,Bl>>>(mat_out, d_out, v_in);
}
void cudaD_sum_column_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim,
const double *src_data, MatrixDim src_dim,
const Int32Pair *indices) {
_sum_column_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indices);
}
void cudaD_matrix_lookup(dim3 Gr, dim3 Bl, const double *data, MatrixDim dim,
const Int32Pair *indices, int indices_size,
double *output) {
_matrix_lookup<<<Gr,Bl>>>(data, dim, indices, indices_size, output);
}
void cudaD_equal_element_mask(dim3 Gr, dim3 Bl, const double *mat1,
const double *mat2, double *mask, MatrixDim mat1_dim,
int mat2_stride, int mask_stride) {
_equal_element_mask<<<Gr,Bl>>>(mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride);
}
/* Some conversion kernels for which it's more convenient to not name them F or D. */
void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) {
_copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) {
_copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) {
_copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) {
_copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) {
_copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) {
_copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) {
_copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) {
_copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
|
bd273f4d7ee62fb3f5dedff8430c22b72a99cc97.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DEVICE_CODE
#include "systemDefines.h"
#include "InteractionEngine_interface.h"
#include "NonBondedInteraction.h"
#include "BondInteraction.h"
#include "AngleInteraction.h"
// #include "CellList_interface.h"
#include "Auxiliary.h"
texture<CoordType, 1, hipReadModeElementType> global_texRef_interaction_coord;
texture<TypeType , 1, hipReadModeElementType> global_texRef_interaction_type;
__constant__
InteractionType nonBondedInteractionType [MaxNumberNonBondedInteraction];
__constant__
ScalorType nonBondedInteractionParameter [MaxNumberNonBondedInteractionParameter];
__constant__
IndexType nonBondedInteractionParameterPosition [MaxNumberNonBondedInteraction];
__constant__
InteractionType bondedInteractionType [MaxNumberBondedInteraction];
__constant__
IndexType bondedInteractionParameterPosition [MaxNumberBondedInteraction];
__constant__
ScalorType bondedInteractionParameter [MaxNumberBondedInteractionParamemter];
__constant__
IndexType const_nonBondedInteractionTableLength[1];
__constant__
IndexType const_numAtomType[1];
__constant__
IndexType const_nonBondedInteractionTable [MaxLengthNonBondedInteractionTable];
void InteractionEngine::init (const MDSystem & sys,
const IndexType & NTread)
{
hasBond = false;
hasAngle = false;
myBlockDim.y = 1;
myBlockDim.z = 1;
myBlockDim.x = NTread;
IndexType nob;
if (sys.ddata.numAtom % myBlockDim.x == 0){
nob = sys.ddata.numAtom / myBlockDim.x;
} else {
nob = sys.ddata.numAtom / myBlockDim.x + 1;
}
atomGridDim = toGridDim (nob);
// size_t sizetype = sizeof(TypeType)*sys.ddata.numMem;
hipBindTexture(0, global_texRef_interaction_coord, sys.ddata.coord,
sizeof(CoordType) * sys.ddata.numMem);
hipBindTexture(0, global_texRef_interaction_type, sys.ddata.type,
sizeof(TypeType) * sys.ddata.numMem);
checkCUDAError ("InteractionEngine::init, bind texture");
// init sum vectors
sum_nb_p.reinit (sys.ddata.numAtom, NThreadForSum);
sum_nb_vxx.reinit (sys.ddata.numAtom, NThreadForSum);
sum_nb_vyy.reinit (sys.ddata.numAtom, NThreadForSum);
sum_nb_vzz.reinit (sys.ddata.numAtom, NThreadForSum);
sum_b_p.reinit (nob, NThreadForSum);
sum_b_vxx.reinit (nob, NThreadForSum);
sum_b_vyy.reinit (nob, NThreadForSum);
sum_b_vzz.reinit (nob, NThreadForSum);
sum_angle_p.reinit (nob, NThreadForSum);
sum_angle_vxx.reinit (nob, NThreadForSum);
sum_angle_vyy.reinit (nob, NThreadForSum);
sum_angle_vzz.reinit (nob, NThreadForSum);
for (IndexType i = 0; i < 8; ++i){
hipStreamCreate(&sum_stream[i]);
}
checkCUDAError ("InteractionEngine::init init sum statistic");
// exclusion list
maxNumExclusion = 0;
sharedExclusionList = false;
exclusion_sbuffSize = size_t(0);
}
static IndexType hroundUp4 (const IndexType x)
{
if (x & 3 == 0){
return x;
}
else {
return ((x >> 2) + 1) << 2;
}
}
void InteractionEngine::
registNonBondedInteraction (const SystemNonBondedInteraction & sysNbInter)
{
if (! sysNbInter.beBuilt()) {
throw MDExcptUnbuiltNonBondedInteraction ("InteractionEngine");
}
if (sysNbInter.numberOfInteraction() > MaxNumberBondedInteraction ){
throw MDExcptExceedConstantMemLimit (
"InteractionEngine::registNonBondedInteraction",
"nonBonedInteractionType",
MaxNumberNonBondedInteraction * sizeof(InteractionType));
}
if (sysNbInter.numberOfParameter() > MaxNumberNonBondedInteractionParameter ){
throw MDExcptExceedConstantMemLimit (
"InteractionEngine::registNonBondedInteraction",
"nonBondedInteractionParameter",
MaxNumberNonBondedInteractionParameter * sizeof(ScalorType));
}
hipMemcpyToSymbol (nonBondedInteractionType,
sysNbInter.interactionType(),
sizeof(InteractionType) * sysNbInter.numberOfInteraction());
hipMemcpyToSymbol (nonBondedInteractionParameterPosition,
sysNbInter.interactionParameterPosition(),
sizeof(ScalorType) * sysNbInter.numberOfInteraction());
hipMemcpyToSymbol (nonBondedInteractionParameter,
sysNbInter.interactionParameter(),
sizeof(IndexType) * sysNbInter.numberOfParameter());
checkCUDAError ("InteractionEngine::init, init NB force setting");
IndexType tableSize = sysNbInter.interactionTableSize();
IndexType tmpNumAtomType = sysNbInter.numberOfAtomTypes();
if (tableSize > MaxLengthNonBondedInteractionTable){
throw MDExcptExceedConstantMemLimit(
"InteractionEngine::registNonBondedInteraction",
"nonBondedInteractionTable",
MaxLengthNonBondedInteractionTable * sizeof (ScalorType));
}
hipMemcpyToSymbol (const_nonBondedInteractionTableLength,
&tableSize,
sizeof (IndexType));
checkCUDAError ("InteractionEngine::init, const_nonBondedInteractionTableLength");
hipMemcpyToSymbol (const_numAtomType,
&tmpNumAtomType,
sizeof (IndexType));
checkCUDAError ("InteractionEngine::init, const_numAtomType");
hipMemcpyToSymbol (const_nonBondedInteractionTable,
sysNbInter.interactionTable(),
sizeof (IndexType) * tableSize);
checkCUDAError ("InteractionEngine::init, const_nonBondedInteractionTable");
// applyNonBondedInteraction_CellList_sbuffSize =
// sizeof(IndexType) * hroundUp4(myBlockDim.x) +
// sizeof(CoordType) * hroundUp4(myBlockDim.x) +
// sizeof(TypeType) * hroundUp4(myBlockDim.x);
// printf ("total %d\npart1 %d\npart2 %d\npart3 %d\nround %d\n",
// applyNonBondedInteraction_CellList_sbuffSize,
// sizeof(IndexType) * hroundUp4(myBlockDim.x),
// sizeof(CoordType) * hroundUp4(myBlockDim.x),
// sizeof(TypeType) * hroundUp4(myBlockDim.x),
// hroundUp4(myBlockDim.x));
// checkCUDAError ("InteractionEngine::init, init nonBondedInteractionTable");
energyCorr = sysNbInter.energyCorrection ();
pressureCorr = sysNbInter.pressureCorrection ();
maxNumExclusion = sysNbInter.maxNumberOfExclusion();
if (maxNumExclusion != 0){
sharedExclusionList = true;
exclusion_sbuffSize = myBlockDim.x * maxNumExclusion * sizeof(IndexType);
if (exclusion_sbuffSize > SystemSharedBuffSize){
sharedExclusionList = false;
}
}
}
void InteractionEngine::
registBondedInteraction (const SystemBondedInteraction & sysBdInter)
{
if (sysBdInter.hasBond() ){
hasBond = true;
}
if (sysBdInter.hasAngle()){
hasAngle = true;
}
if (sysBdInter.numberOfInteraction() > MaxNumberBondedInteraction ){
throw MDExcptExceedConstantMemLimit (
"InteractionEngine::registBondedInteraction",
"bondedInteractionType",
MaxNumberBondedInteraction * sizeof(InteractionType));
}
if (sysBdInter.numberOfParameter() > MaxNumberBondedInteractionParamemter ){
throw MDExcptExceedConstantMemLimit (
"InteractionEngine::registBondedInteraction",
"bondedInteractionParameter",
MaxNumberBondedInteractionParamemter * sizeof(ScalorType));
}
if (hasBond || hasAngle){
hipMemcpyToSymbol (bondedInteractionType,
sysBdInter.interactionType(),
sizeof(InteractionType) * sysBdInter.numberOfInteraction());
hipMemcpyToSymbol (bondedInteractionParameterPosition,
sysBdInter.interactionParameterPosition(),
sizeof(ScalorType) * sysBdInter.numberOfInteraction());
hipMemcpyToSymbol (bondedInteractionParameter,
sysBdInter.interactionParameter(),
sizeof(IndexType) * sysBdInter.numberOfParameter());
checkCUDAError ("InteractionEngine::init, init bond force setting");
// cal shared buff size
calBondInteraction_sbuffSize = myBlockDim.x * sizeof(ScalorType);
calAngleInteraction_sbuffSize = myBlockDim.x * sizeof(ScalorType);
}
}
InteractionEngine::~InteractionEngine()
{
hipUnbindTexture(global_texRef_interaction_coord);
hipUnbindTexture(global_texRef_interaction_type);
for (IndexType i = 0; i < 8; ++i){
hipStreamDestroy(sum_stream[i]);
}
}
void InteractionEngine::clearInteraction (MDSystem & sys)
{
hipLaunchKernelGGL(( clearForce)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
sys.ddata.numAtom,
sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz);
checkCUDAError ("InteractionEngine::clearInteraction");
}
// nblock should be 1 and block size should be 1
__global__ void
applyEnergyPressureCorrection (ScalorType * ddata,
ScalorType energyCorr,
ScalorType pressureCorr)
{
ddata[mdStatisticEnergyCorrection] = energyCorr;
ddata[mdStatisticPressureCorrection] = pressureCorr;
}
void InteractionEngine::
applyNonBondedInteraction (MDSystem & sys,
const NeighborList & nlist,
const ExclusionList * excllist,
MDTimer *timer )
{
if (timer != NULL) timer->tic(mdTimeNonBondedInteraction);
if (excllist == NULL){
hipLaunchKernelGGL(( calNonBondedInteraction_neighbor)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx,
sys.ddata.forcy,
sys.ddata.forcz,
sys.ddata.type,
sys.ddata.rcut,
sys.box,
nlist.dnlist);
}
else{
hipLaunchKernelGGL(( calNonBondedInteraction_neighbor)
, dim3(atomGridDim), dim3(myBlockDim),
exclusion_sbuffSize, 0,
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx,
sys.ddata.forcy,
sys.ddata.forcz,
sys.ddata.type,
sys.box,
nlist.dnlist,
excllist->dexcllist,
sharedExclusionList
);
}
checkCUDAError ("InteractionEngine::applyInteraction nb");
err.check ("interaction engine nb");
if (timer != NULL) timer->toc(mdTimeNonBondedInteraction);
}
// void InteractionEngine::
// applyNonBondedInteraction (MDSystem & sys,
// const CellList & clist,
// const ScalorType & rcut,
// NeighborList & nlist,
// MDTimer *timer )
// {
// if (timer != NULL) timer->tic(mdTimeBuildNeighborList);
// size_t applyNonBondedInteraction_CellList_sbuffSize =
// (sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) *
// hroundUp4(clist.getBlockDim().x);
// // sizeof(IndexType) * hroundUp4(myBlockDim.x) +
// // sizeof(CoordType) * hroundUp4(myBlockDim.x) +
// // sizeof(TypeType) * hroundUp4(myBlockDim.x);
// calNonBondedInteraction
// <<<clist.getCellGrimDim(), clist.getBlockDim(),
// applyNonBondedInteraction_CellList_sbuffSize>>> (
// sys.ddata.numAtom,
// sys.ddata.coord,
// sys.ddata.forcx,
// sys.ddata.forcy,
// sys.ddata.forcz,
// sys.ddata.type,
// sys.box,
// clist.dclist,
// rcut,
// nlist.dnlist,
// err.ptr_de);
// checkCUDAError ("InteractionEngine::applyInteraction nb");
// err.check ("interaction engine nb");
// if (timer != NULL) timer->toc(mdTimeBuildNeighborList);
// }
void InteractionEngine::
applyNonBondedInteraction (MDSystem & sys,
const NeighborList & nlist,
MDStatistic & st,
const ExclusionList * excllist,
MDTimer *timer )
{
if (timer != NULL) timer->tic(mdTimeNBInterStatistic);
if (excllist == NULL){
hipLaunchKernelGGL(( calNonBondedInteraction_neighbor)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx,
sys.ddata.forcy,
sys.ddata.forcz,
sys.ddata.type,
sys.ddata.rcut,
sys.box,
nlist.dnlist
,
sum_nb_p.buff,
sum_nb_vxx.buff,
sum_nb_vyy.buff,
sum_nb_vzz.buff
);
}
else {
hipLaunchKernelGGL(( calNonBondedInteraction_neighbor)
, dim3(atomGridDim), dim3(myBlockDim),
exclusion_sbuffSize, 0,
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx,
sys.ddata.forcy,
sys.ddata.forcz,
sys.ddata.type,
sys.box,
nlist.dnlist
,
excllist->dexcllist,
sharedExclusionList,
sum_nb_p.buff,
sum_nb_vxx.buff,
sum_nb_vyy.buff,
sum_nb_vzz.buff
);
}
checkCUDAError ("InteractionEngine::applyInteraction nb (with statistic)");
err.check ("interaction engine nb");
hipDeviceSynchronize();
sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0);
sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX);
sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY);
sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ);
ScalorType volumei = sys.box.size.x * sys.box.size.y * sys.box.size.z;
volumei = 1.f / volumei;
// printf ("apply Ec %f, Pc %f\n",
// energyCorr * volumei,
// pressureCorr * volumei * volumei);
hipLaunchKernelGGL(( applyEnergyPressureCorrection)
, dim3(1), dim3(1), 0, 0, st.ddata,
energyCorr * volumei,
pressureCorr * volumei * volumei);
hipDeviceSynchronize();
if (timer != NULL) timer->toc(mdTimeNBInterStatistic);
}
void InteractionEngine::
applyNonBondedInteraction (MDSystem & sys,
const CellList & clist,
const ScalorType & rcut,
MDTimer *timer )
{
if (!clist.isempty()){
if (timer != NULL) timer->tic(mdTimeNonBondedInteraction);
size_t applyNonBondedInteraction_CellList_sbuffSize =
(sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) *
hroundUp4(clist.getCellBlockDim().x);
hipLaunchKernelGGL(( calNonBondedInteraction_cell)
, dim3(clist.getCellGrimDim()), dim3(clist.getCellBlockDim()),
applyNonBondedInteraction_CellList_sbuffSize, 0,
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx,
sys.ddata.forcy,
sys.ddata.forcz,
sys.ddata.type,
sys.box,
clist.dclist,
rcut,
err.ptr_de);
checkCUDAError ("InteractionEngine::applyInteraction nb");
err.check ("interaction engine nb");
if (timer != NULL) timer->toc(mdTimeNonBondedInteraction);
}
else {
applyNonBondedInteraction (sys, rcut, timer);
}
}
void InteractionEngine::
applyNonBondedInteraction (MDSystem & sys,
const CellList & clist,
const ScalorType & rcut,
MDStatistic & st,
MDTimer *timer )
{
if (!clist.isempty()){
if (timer != NULL) timer->tic(mdTimeNBInterStatistic);
size_t applyNonBondedInteraction_CellList_sbuffSize =
(sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) *
hroundUp4(clist.getCellBlockDim().x);
hipLaunchKernelGGL(( calNonBondedInteraction_cell)
, dim3(clist.getCellGrimDim()), dim3(clist.getCellBlockDim()),
applyNonBondedInteraction_CellList_sbuffSize, 0,
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx,
sys.ddata.forcy,
sys.ddata.forcz,
sys.ddata.type,
sys.box,
clist.dclist,
rcut,
sum_nb_p.buff,
sum_nb_vxx.buff,
sum_nb_vyy.buff,
sum_nb_vzz.buff,
err.ptr_de
);
checkCUDAError ("InteractionEngine::applyInteraction nb (with statistic)");
err.check ("interaction engine nb");
hipDeviceSynchronize();
sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0);
sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX);
sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY);
sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ);
ScalorType volumei = sys.box.size.x * sys.box.size.y * sys.box.size.z;
volumei = 1.f / volumei;
// printf ("apply Ec %f, Pc %f\n",
// energyCorr * volumei,
// pressureCorr * volumei * volumei);
hipLaunchKernelGGL(( applyEnergyPressureCorrection)
, dim3(1), dim3(1), 0, 0, st.ddata,
energyCorr * volumei,
pressureCorr * volumei * volumei);
hipDeviceSynchronize();
if (timer != NULL) timer->toc(mdTimeNBInterStatistic);
}
else {
applyNonBondedInteraction (sys, rcut, st, timer);
}
}
void InteractionEngine::
applyNonBondedInteraction (MDSystem & sys,
const ScalorType & rcut,
MDTimer *timer )
{
if (timer != NULL) timer->tic(mdTimeNonBondedInteraction);
size_t applyNonBondedInteraction_AllPair_sbuffSize =
(sizeof(CoordType) + sizeof(TypeType)) *
hroundUp4(myBlockDim.x);
hipLaunchKernelGGL(( calNonBondedInteraction_all)
, dim3(atomGridDim), dim3(myBlockDim),
applyNonBondedInteraction_AllPair_sbuffSize, 0,
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx,
sys.ddata.forcy,
sys.ddata.forcz,
sys.ddata.type,
sys.box,
rcut,
err.ptr_de);
checkCUDAError ("InteractionEngine::applyInteraction nb");
err.check ("interaction engine nb");
if (timer != NULL) timer->toc(mdTimeNonBondedInteraction);
}
void InteractionEngine::
applyNonBondedInteraction (MDSystem & sys,
const ScalorType & rcut,
MDStatistic & st,
MDTimer *timer )
{
if (timer != NULL) timer->tic(mdTimeNBInterStatistic);
size_t applyNonBondedInteraction_AllPair_sbuffSize =
(sizeof(CoordType) + sizeof(TypeType)) *
hroundUp4(myBlockDim.x);
hipLaunchKernelGGL(( calNonBondedInteraction_all)
, dim3(atomGridDim), dim3(myBlockDim),
applyNonBondedInteraction_AllPair_sbuffSize, 0,
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx,
sys.ddata.forcy,
sys.ddata.forcz,
sys.ddata.type,
sys.box,
rcut,
sum_nb_p.buff,
sum_nb_vxx.buff,
sum_nb_vyy.buff,
sum_nb_vzz.buff,
err.ptr_de);
checkCUDAError ("InteractionEngine::applyInteraction nb (with statistic)");
err.check ("interaction engine nb");
hipDeviceSynchronize();
sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0);
sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX);
sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY);
sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ);
ScalorType volumei = sys.box.size.x * sys.box.size.y * sys.box.size.z;
volumei = 1.f / volumei;
// printf ("apply Ec %f, Pc %f\n",
// energyCorr * volumei,
// pressureCorr * volumei * volumei);
hipLaunchKernelGGL(( applyEnergyPressureCorrection)
, dim3(1), dim3(1), 0, 0, st.ddata,
energyCorr * volumei,
pressureCorr * volumei * volumei);
hipDeviceSynchronize();
if (timer != NULL) timer->toc(mdTimeNBInterStatistic);
}
// void InteractionEngine::
// applyNonBondedInteraction (MDSystem & sys,
// const CellList & clist,
// const ScalorType & rcut,
// NeighborList & nlist,
// MDStatistic & st,
// MDTimer *timer )
// {
// if (timer != NULL) timer->tic(mdTimeBuildNeighborList);
// if (!clist.isempty()){
// size_t applyNonBondedInteraction_CellList_sbuffSize =
// (sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) *
// hroundUp4(clist.getBlockDim().x);
// calNonBondedInteraction
// <<<clist.getCellGrimDim(), clist.getBlockDim(),
// applyNonBondedInteraction_CellList_sbuffSize>>> (
// sys.ddata.numAtom,
// sys.ddata.coord,
// sys.ddata.forcx,
// sys.ddata.forcy,
// sys.ddata.forcz,
// sys.ddata.type,
// sys.box,
// clist.dclist,
// rcut,
// nlist.dnlist,
// sum_nb_p.buff,
// sum_nb_vxx.buff,
// sum_nb_vyy.buff,
// sum_nb_vzz.buff,
// err.ptr_de
// );
// }
// checkCUDAError ("InteractionEngine::applyInteraction nb (with statistic)");
// err.check ("interaction engine nb");
// hipDeviceSynchronize();
// sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0);
// sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX, 1);
// sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY, 2);
// sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ, 3);
// ScalorType volumei = sys.box.size.x * sys.box.size.y * sys.box.size.z;
// volumei = 1.f / volumei;
// // printf ("apply Ec %f, Pc %f\n",
// // energyCorr * volumei,
// // pressureCorr * volumei * volumei);
// applyEnergyPressureCorrection
// <<<1, 1, 0, 4>>> (st.ddata,
// energyCorr * volumei,
// pressureCorr * volumei * volumei);
// hipDeviceSynchronize();
// if (timer != NULL) timer->toc(mdTimeBuildNeighborList);
// }
void InteractionEngine::
calTwinRangeCorrection (const MDSystem & sys,
const CellList & clist,
const ScalorType & rcut1,
const ScalorType & rcut2,
TwinRangeCorrectionRecorder & twrec,
MDTimer * timer)
{
if (timer != NULL) timer->tic(mdTimeNBInterTwinRange);
if (clist.isempty()){
size_t applyNonBondedInteraction_AllPair_sbuffSize =
(sizeof(CoordType) + sizeof(TypeType)) *
hroundUp4(myBlockDim.x);
hipLaunchKernelGGL(( calTwinRangeCorrection_all)
, dim3(atomGridDim), dim3(myBlockDim),
applyNonBondedInteraction_AllPair_sbuffSize, 0,
sys.ddata.numAtom,
sys.ddata.coord,
twrec.forcx,
twrec.forcy,
twrec.forcz,
sys.ddata.type,
sys.box,
rcut1,
rcut2,
sum_nb_p.buff,
sum_nb_vxx.buff,
sum_nb_vyy.buff,
sum_nb_vzz.buff,
err.ptr_de);
}
else {
size_t applyNonBondedInteraction_CellList_sbuffSize =
(sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) *
hroundUp4(clist.getCellBlockDim().x);
hipLaunchKernelGGL(( calTwinRangeCorrection_cell)
, dim3(clist.getCellGrimDim()), dim3(clist.getCellBlockDim()),
applyNonBondedInteraction_CellList_sbuffSize, 0,
sys.ddata.numAtom,
sys.ddata.coord,
twrec.forcx,
twrec.forcy,
twrec.forcz,
sys.ddata.type,
sys.box,
clist.dclist,
rcut1,
rcut2,
sum_nb_p.buff,
sum_nb_vxx.buff,
sum_nb_vyy.buff,
sum_nb_vzz.buff,
err.ptr_de);
}
checkCUDAError ("TwinRangeCorrectionRecorder::calTwinRangeCorrection");
err.check ("TwinRangeCorrectionRecorder::calTwinRangeCorrection");
hipDeviceSynchronize();
MDStatistic st (sys);
sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0);
sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX);
sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY);
sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ);
hipDeviceSynchronize();
st.updateHost ();
twrec.energyCorrection() = st.nonBondedEnergy();
twrec.pressureCorrection() = st.pressure(sys.box);
if (timer != NULL) timer->toc(mdTimeNBInterTwinRange);
}
void InteractionEngine::
buildNeighborListCalTwinRangeCorrection (const MDSystem & sys,
const CellList & clist,
const ScalorType & rcut1,
const ScalorType & rcut2,
NeighborList & nlist,
TwinRangeCorrectionRecorder & twrec,
MDTimer * timer)
{
if (timer != NULL) timer->tic(mdTimeBuildNeighborList);
if (clist.isempty()){
size_t applyNonBondedInteraction_AllPair_sbuffSize =
(sizeof(CoordType) + sizeof(TypeType)) *
hroundUp4(myBlockDim.x);
hipLaunchKernelGGL(( buildNeighborListCalTwinRangeCorr_all)
, dim3(atomGridDim), dim3(myBlockDim),
applyNonBondedInteraction_AllPair_sbuffSize, 0,
sys.ddata.numAtom,
sys.ddata.coord,
twrec.forcx,
twrec.forcy,
twrec.forcz,
sys.ddata.type,
sys.box,
rcut1,
rcut2,
nlist.dnlist,
sum_nb_p.buff,
sum_nb_vxx.buff,
sum_nb_vyy.buff,
sum_nb_vzz.buff,
err.ptr_de);
}
else {
size_t applyNonBondedInteraction_CellList_sbuffSize =
(sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) *
hroundUp4(clist.getCellBlockDim().x);
hipLaunchKernelGGL(( buildNeighborListCalTwinRangeCorr_cell)
, dim3(clist.getCellGrimDim()), dim3(clist.getCellBlockDim()),
applyNonBondedInteraction_CellList_sbuffSize, 0,
sys.ddata.numAtom,
sys.ddata.coord,
twrec.forcx,
twrec.forcy,
twrec.forcz,
sys.ddata.type,
sys.box,
clist.dclist,
rcut1,
rcut2,
nlist.dnlist,
sum_nb_p.buff,
sum_nb_vxx.buff,
sum_nb_vyy.buff,
sum_nb_vzz.buff,
err.ptr_de);
}
checkCUDAError ("TwinRangeCorrectionRecorder::calTwinRangeCorrection");
err.check ("TwinRangeCorrectionRecorder::calTwinRangeCorrection");
hipDeviceSynchronize();
MDStatistic st (sys);
sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0);
sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX);
sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY);
sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ);
hipDeviceSynchronize();
st.updateHost ();
twrec.energyCorrection() = st.nonBondedEnergy();
twrec.pressureCorrection() = st.pressure(sys.box);
if (timer != NULL) timer->toc(mdTimeBuildNeighborList);
}
void InteractionEngine::
applyBondedInteraction (MDSystem & sys,
const BondedInteractionList & bdlist,
MDTimer *timer )
{
if (hasBond) {
if (timer != NULL) timer->tic(mdTimeBondedInteraction);
hipLaunchKernelGGL(( calBondInteraction)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz,
sys.box,
bdlist.dbondlist);
checkCUDAError ("InteractionEngine::applyInteraction bonded");
err.check ("interaction engine b");
if (timer != NULL) timer->toc(mdTimeBondedInteraction);
}
if (hasAngle){
if (timer != NULL) timer->tic(mdTimeAngleInteraction);
hipLaunchKernelGGL(( calAngleInteraction)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz,
sys.box,
bdlist.danglelist);
checkCUDAError ("InteractionEngine::applyInteraction angle");
err.check ("interaction engine angle");
if (timer != NULL) timer->toc(mdTimeAngleInteraction);
}
}
void InteractionEngine::
applyBondedInteraction (MDSystem & sys,
const BondedInteractionList & bdlist,
MDStatistic & st,
MDTimer *timer)
{
if (hasBond) {
if (timer != NULL) timer->tic(mdTimeBInterStatistic);
hipLaunchKernelGGL(( calBondInteraction)
, dim3(atomGridDim), dim3(myBlockDim),
calBondInteraction_sbuffSize, 0,
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz,
sys.box,
bdlist.dbondlist
,
sum_b_p.buff,
sum_b_vxx.buff,
sum_b_vyy.buff,
sum_b_vzz.buff,
err.ptr_de
);
checkCUDAError ("InteractionEngine::applyInteraction bonded (with statistic)");
err.check ("interaction engine");
if (timer != NULL) timer->toc(mdTimeBInterStatistic);
}
if (hasBond) {
if (timer != NULL) timer->tic(mdTimeBInterStatistic);
hipDeviceSynchronize();
sum_b_p.sumBuffAdd(st.ddata, mdStatisticBondedPotential);
sum_b_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX);
sum_b_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY);
sum_b_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ);
hipDeviceSynchronize();
if (timer != NULL) timer->toc(mdTimeBInterStatistic);
checkCUDAError ("InteractionEngine::applyInteraction sum bond statistic (with statistic)");
}
if (hasAngle){
if (timer != NULL) timer->tic(mdTimeAngleInterStatistic);
hipLaunchKernelGGL(( calAngleInteraction)
, dim3(atomGridDim), dim3(myBlockDim),
calAngleInteraction_sbuffSize, 0,
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz,
sys.box,
bdlist.danglelist,
sum_angle_p.buff,
sum_angle_vxx.buff,
sum_angle_vyy.buff,
sum_angle_vzz.buff,
err.ptr_de);
checkCUDAError ("InteractionEngine::applyInteraction angle");
err.check ("interaction engine angle");
if (timer != NULL) timer->toc(mdTimeAngleInterStatistic);
}
if (hasAngle){
if (timer != NULL) timer->tic(mdTimeAngleInterStatistic);
sum_angle_p.sumBuffAdd(st.ddata, mdStatisticBondedPotential);
sum_angle_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX);
sum_angle_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY);
sum_angle_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ);
hipDeviceSynchronize();
if (timer != NULL) timer->toc(mdTimeAngleInterStatistic);
checkCUDAError ("InteractionEngine::applyInteraction sum angle statistic (with statistic)");
}
}
// void InteractionEngine::
// calculateWidomDeltaEnergy (const MDSystem & sys,
// const NeighborList & nlist,
// WidomTestParticleInsertion_NVT & wtest,
// MDTimer * timer )
// {
// if (timer != NULL) timer->tic(mdTimeNBInterStatistic);
// // printf ("### %d\n", nlist.mode);
// if (nlist.mode == CellListBuilt){
// // printf ("### here %f\n", wtest.energyCorrection());
// widomDeltaPoten_NVT
// <<<toGridDim(wtest.numTestParticle()),
// nlist.myBlockDim.x,
// nlist.myBlockDim.x * sizeof(ScalorType)>>> (
// wtest.numTestParticle(),
// wtest.coordTestParticle,
// wtest.typeTestParticle,
// sys.ddata.numAtom,
// sys.ddata.coord,
// sys.ddata.type,
// sys.box,
// nlist.dclist,
// wtest.sumExpDeltaU.buff,
// err.ptr_de);
// }
// else if (nlist.mode == AllPairBuilt){
// // printf ("### here %f\n", wtest.energyCorrection());
// widomDeltaPoten_allPair_NVT
// <<<toGridDim(wtest.numTestParticle()),
// DefaultNThreadPerBlock,
// DefaultNThreadPerBlock * sizeof(ScalorType)>>> (
// wtest.numTestParticle(),
// wtest.coordTestParticle,
// wtest.typeTestParticle,
// sys.ddata.numAtom,
// sys.ddata.coord,
// sys.ddata.type,
// sys.box,
// nlist.myrlist,
// wtest.sumExpDeltaU.buff,
// err.ptr_de);
// }
// if (timer != NULL) timer->toc(mdTimeNBInterStatistic);
// }
// void InteractionEngine::
// calculateWidomDeltaEnergy (const MDSystem & sys,
// const NeighborList & nlist,
// WidomTestParticleInsertion_NVT2 & wtest,
// MDTimer * timer )
// {
// if (timer != NULL) timer->tic(mdTimeNBInterStatistic);
// // printf ("### %d\n", nlist.mode);
// if (nlist.mode == CellListBuilt){
// // printf ("### here %f\n", wtest.energyCorrection());
// widomDeltaPoten_NVT
// <<<toGridDim(wtest.numTestParticle()),
// nlist.myBlockDim.x,
// nlist.myBlockDim.x * sizeof(ScalorType)>>> (
// wtest.numTestParticle(),
// wtest.coordTestParticle,
// wtest.typeTestParticle,
// sys.ddata.numAtom,
// sys.ddata.coord,
// sys.ddata.type,
// sys.box,
// nlist.dclist,
// wtest.sumExpDeltaU.buff,
// err.ptr_de);
// }
// if (timer != NULL) timer->toc(mdTimeNBInterStatistic);
// }
// void InteractionEngine::
// calculateWidomDeltaEnergy (const MDSystem & sys,
// const NeighborList & nlist,
// WidomTestParticleInsertion_NPT & wtest,
// MDTimer * timer )
// {
// if (timer != NULL) timer->tic(mdTimeNBInterStatistic);
// // printf ("### %d\n", nlist.mode);
// if (nlist.mode == CellListBuilt){
// // printf ("### here %f, n: %d\n", wtest.energyCorrection(), wtest.numTestParticle());
// widomDeltaPoten_NVT
// <<<toGridDim(wtest.numTestParticle()),
// nlist.myBlockDim.x,
// nlist.myBlockDim.x * sizeof(ScalorType)>>> (
// wtest.numTestParticle(),
// wtest.coordTestParticle,
// wtest.typeTestParticle,
// sys.ddata.numAtom,
// sys.ddata.coord,
// sys.ddata.type,
// sys.box,
// nlist.dclist,
// wtest.sumExpDeltaU.buff,
// err.ptr_de);
// }
// else if (nlist.mode == AllPairBuilt){
// // printf ("### here %f\n", wtest.energyCorrection());
// widomDeltaPoten_allPair_NVT
// <<<toGridDim(wtest.numTestParticle()),
// DefaultNThreadPerBlock,
// DefaultNThreadPerBlock * sizeof(ScalorType)>>> (
// wtest.numTestParticle(),
// wtest.coordTestParticle,
// wtest.typeTestParticle,
// sys.ddata.numAtom,
// sys.ddata.coord,
// sys.ddata.type,
// sys.box,
// nlist.myrlist,
// wtest.sumExpDeltaU.buff,
// err.ptr_de);
// }
// // for (unsigned i = 0; i < wtest.numTestParticle(); ++i){
// // printf ("%d %f (%f %f %f)\n", i,
// // wtest.sumExpDeltaU.buff[i],
// // wtest.coordTestParticle[i].x,
// // wtest.coordTestParticle[i].y,
// // wtest.coordTestParticle[i].z
// // );
// // }
// if (timer != NULL) timer->toc(mdTimeNBInterStatistic);
// }
__global__ void clearForce (const IndexType numAtom,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType ii = threadIdx.x + bid * blockDim.x;
if (ii < numAtom) {
forcx[ii] = 0.0f;
forcy[ii] = 0.0f;
forcz[ii] = 0.0f;
}
}
// __global__ void
// calNonBondedInteraction (const CoordType * coord,
// const TypeType * type,
// DeviceCellListData clist,
// DeviceCellListProperty clistPro,
// ScalorType * forcx,
// ScalorType * forcy,
// ScalorType * forcz,
// bool sharednbForceTable)
// {
// IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
// IndexType tid = threadIdx.x;
// ScalorType fsumx(0.f), fsumy(0.f), fsumz(0.f);
// extern __shared__ volatile char pub_sbuff[];
// volatile IndexType * targetIndex = (volatile IndexType *) pub_sbuff;
// CoordType * targetCoord = (CoordType *) &targetIndex[roundUp4(blockDim.x)];
// volatile TypeType * targetType = (volatile TypeType *) &targetCoord[roundUp4(blockDim.x)];
// __syncthreads();
// IndexType ii = get (clist, bid, tid);
// CoordType ref;
// TypeType refType;
// if (ii != MaxIndexValue){
// ref = tex1Dfetch (global_texRef_interaction_coord, ii);
// refType = tex1Dfetch(global_texRef_interaction_type, ii);
// }
// for (unsigned i = 0; i < numNeighborCell(clistPro, bid); ++i){
// __syncthreads();
// IndexType targetCellIndex = getTargetCellIndex (clistPro, bid, i);
// CoordType shift = getShiftValue (clistPro, bid, i);
// IndexType targetIndex[tid] = get (clist, targetCellIndex, tid);
// if (targetIndex[tid] != MaxIndexValue){
// targetCoord[tid] = tex1Dfetch (global_texRef_interaction_coord, targetIndexes[tid]);
// targetType[tid] = tex1Dfetch (global_texRef_interaction_type, targetIndexes[tid]);
// }
// __syncthreads ();
// if (ii != MaxIndexValue){
// for (IndexType jj = 0; jj < blockDim.x; ++jj){
// if (targetIndex[jj] == MaxIndexValue) continue;
// ScalorType diffx = targetCoord[jj].x + shift.x - ref.x;
// ScalorType diffy = targetCoord[jj].y + shift.y - ref.y;
// ScalorType diffz = targetCoord[jj].z + shift.z - ref.z;
// if ((diffx*diffx+diffy*diffy+diffz*diffz) < rlist2 &&
// targetIndex[jj] != ii){
// ForceIndexType fidx;
// if (sharednbForceTable){
// fidx = nonBondedInteractionTableItem (
// nonBondedInteractionTable, const_numAtomType, refType, targetType[jj]);
// }
// else {
// fidx = nonBondedInteractionTableItem (
// nonBondedInteractionTable, const_numAtomType, refType, targetType[jj]);
// }
// ScalorType fx, fy, fz;
// nbforce (nonBondedInteractionType[fidx],
// &nonBondedInteractionParameter
// [nonBondedInteractionParameterPosition[fidx]],
// diffx, diffy, diffz,
// &fx, &fy, &fz);
// fsumx += fx;
// fsumy += fy;
// fsumz += fz;
// }
// }
// }
// }
// if (ii != MaxIndexValue){
// forcx[ii] += fsumx;
// forcy[ii] += fsumy;
// forcz[ii] += fsumz;
// }
// }
__global__ void
calNonBondedInteraction_neighbor (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const ScalorType * rcut,
const RectangularBox box,
const DeviceNeighborList nlist)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
ScalorType fsumx = 0.0f;
ScalorType fsumy = 0.0f;
ScalorType fsumz = 0.0f;
IndexType ii = tid + bid * blockDim.x;
if (ii < numAtom) {
CoordType ref (tex1Dfetch(global_texRef_interaction_coord, ii));
ScalorType refrcut2 = rcut[ii];
refrcut2 = refrcut2 * refrcut2;
ScalorType fx(0.f), fy(0.f), fz(0.f);
for (IndexType jj = 0, nlistPosi = ii;
jj < nlist.Nneighbor[ii];
++jj, nlistPosi += nlist.stride){
IndexType targetIdx ( nlist.data [nlistPosi] );
IndexType nbForceIndex ( nlist.forceIndex [nlistPosi] );
CoordType target ( tex1Dfetch(global_texRef_interaction_coord, targetIdx) );
ScalorType diffx ( target.x - ref.x );
ScalorType diffy ( target.y - ref.y );
ScalorType diffz ( target.z - ref.z );
shortestImage (box, &diffx, &diffy, &diffz);
if (diffx*diffx + diffy*diffy + diffz*diffz < refrcut2){
nbForce (nonBondedInteractionType[nbForceIndex],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[nbForceIndex]],
diffx, diffy, diffz,
&fx, &fy, &fz);
fsumx += fx;
fsumy += fy;
fsumz += fz;
}
}
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
}
__global__ void
calNonBondedInteraction_neighbor (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const DeviceNeighborList nlist,
const DeviceExclusionList dexcllist,
const bool sharedExclusionList)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
ScalorType fsumx = 0.0f;
ScalorType fsumy = 0.0f;
ScalorType fsumz = 0.0f;
IndexType ii = tid + bid * blockDim.x;
IndexType * ptr_excllist;
IndexType myNumExclusion (0);
extern __shared__ char excl_sbuff[];
if (dexcllist.maxNumExclusion != 0 && ii < numAtom){
myNumExclusion = dexcllist.numExclusion[ii];
if (sharedExclusionList){
ptr_excllist = (IndexType *) excl_sbuff;
for (IndexType jj = 0; jj < myNumExclusion; ++jj){
ptr_excllist[jj*blockDim.x+tid] =
dexcllist.exclusionNeighborIndex[jj*dexcllist.stride+ii];
}
}
}
if (ii < numAtom) {
CoordType ref = tex1Dfetch(global_texRef_interaction_coord, ii);
ScalorType fx(0.f), fy(0.f), fz(0.f);
for (IndexType jj = 0, nlistPosi = ii;
jj < nlist.Nneighbor[ii];
++jj, nlistPosi += nlist.stride){
IndexType targetIdx ( nlist.data [nlistPosi] );
IndexType nbForceIndex;
CoordType target;
ScalorType diffx, diffy, diffz;
if (sharedExclusionList){
for (IndexType kk = 0; kk < myNumExclusion; ++kk){
if (ptr_excllist[kk*blockDim.x+tid] == targetIdx) {
goto skipInter;
}
}
}
else {
for (IndexType kk = 0; kk < myNumExclusion; ++kk){
if (dexcllist.exclusionNeighborIndex[kk*dexcllist.stride+ii] == targetIdx) {
goto skipInter;
}
}
}
nbForceIndex = ( nlist.forceIndex [nlistPosi] );
target = ( tex1Dfetch(global_texRef_interaction_coord, targetIdx) );
diffx = ( target.x - ref.x );
diffy = ( target.y - ref.y );
diffz = ( target.z - ref.z );
shortestImage (box, &diffx, &diffy, &diffz);
nbForce (nonBondedInteractionType[nbForceIndex],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[nbForceIndex]],
diffx, diffy, diffz,
&fx, &fy, &fz);
fsumx += fx;
fsumy += fy;
fsumz += fz;
skipInter:
{
}
}
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
}
__global__ void
calNonBondedInteraction_neighbor (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const ScalorType * rcut,
const RectangularBox box,
const DeviceNeighborList nlist,
ScalorType * statistic_nb_buff0,
ScalorType * statistic_nb_buff1,
ScalorType * statistic_nb_buff2,
ScalorType * statistic_nb_buff3)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
ScalorType fsumx = 0.0f;
ScalorType fsumy = 0.0f;
ScalorType fsumz = 0.0f;
IndexType ii = tid + bid * blockDim.x;
ScalorType myPoten = 0.0f, myVxx = 0.0f, myVyy = 0.0f, myVzz = 0.0f;
if (ii < numAtom) {
CoordType ref;
ref = tex1Dfetch(global_texRef_interaction_coord, ii);
ScalorType refrcut2 = rcut[ii];
refrcut2 = refrcut2 * refrcut2;
ScalorType fx(0.f), fy(0.f), fz(0.f);
ScalorType dp;
for (IndexType jj = 0, nlistPosi = ii;
jj < nlist.Nneighbor[ii];
++jj, nlistPosi += nlist.stride){
IndexType targetIdx ( nlist.data[nlistPosi] );
IndexType nbForceIndex ( nlist.forceIndex [nlistPosi] );
CoordType target ( tex1Dfetch(global_texRef_interaction_coord, targetIdx) );
ScalorType diffx ( target.x - ref.x );
ScalorType diffy ( target.y - ref.y );
ScalorType diffz ( target.z - ref.z );
shortestImage (box, &diffx, &diffy, &diffz);
if (diffx*diffx + diffy*diffy + diffz*diffz < refrcut2){
nbForcePoten (nonBondedInteractionType[nbForceIndex],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[nbForceIndex]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
// printf ("## %d\t%d\t%f\t%f\t%f\n",
// ii, targetIdx,
// ref.z, target.z, fz);
// printf ("%f, %f %f %f, %f %f %f, %f %f %f, %f\n",
// sqrtf(diffx*diffx+diffy*diffy+diffz*diffz),
// ref.x, ref.y, ref.z,
// target.x, target.y, target.z,
// diffx, diffy, diffz,
// dp
// );
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
}
}
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
if (ii < numAtom){
statistic_nb_buff0[ii] = myPoten * 0.5f;
statistic_nb_buff1[ii] = myVxx * 0.5f;
statistic_nb_buff2[ii] = myVyy * 0.5f;
statistic_nb_buff3[ii] = myVzz * 0.5f;
}
}
__global__ void
calNonBondedInteraction_neighbor (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const DeviceNeighborList nlist,
const DeviceExclusionList dexcllist,
const bool sharedExclusionList,
ScalorType * statistic_nb_buff0,
ScalorType * statistic_nb_buff1,
ScalorType * statistic_nb_buff2,
ScalorType * statistic_nb_buff3)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
ScalorType fsumx = 0.0f;
ScalorType fsumy = 0.0f;
ScalorType fsumz = 0.0f;
IndexType ii = tid + bid * blockDim.x;
ScalorType myPoten = 0.0f, myVxx = 0.0f, myVyy = 0.0f, myVzz = 0.0f;
IndexType * ptr_excllist;
IndexType myNumExclusion (0);
extern __shared__ char excl_sbuff[];
if (dexcllist.maxNumExclusion != 0 && ii < numAtom){
myNumExclusion = dexcllist.numExclusion[ii];
if (sharedExclusionList){
ptr_excllist = (IndexType *) excl_sbuff;
for (IndexType jj = 0; jj < myNumExclusion; ++jj){
ptr_excllist[jj*blockDim.x+tid] =
dexcllist.exclusionNeighborIndex[jj*dexcllist.stride+ii];
}
}
}
if (ii < numAtom) {
CoordType ref;
ref = tex1Dfetch(global_texRef_interaction_coord, ii);
ScalorType fx(0.f), fy(0.f), fz(0.f);
ScalorType dp;
for (IndexType jj = 0, nlistPosi = ii;
jj < nlist.Nneighbor[ii];
++jj, nlistPosi += nlist.stride){
IndexType targetIdx ( nlist.data[nlistPosi] );
IndexType nbForceIndex;
CoordType target;
ScalorType diffx, diffy, diffz;
if (sharedExclusionList){
for (IndexType kk = 0; kk < myNumExclusion; ++kk){
if (ptr_excllist[kk*blockDim.x+tid] == targetIdx) {
goto skipInter;
}
}
}
else {
for (IndexType kk = 0; kk < myNumExclusion; ++kk){
if (dexcllist.exclusionNeighborIndex[kk*dexcllist.stride+ii] == targetIdx) {
goto skipInter;
}
}
}
nbForceIndex = ( nlist.forceIndex [nlistPosi] );
target = ( tex1Dfetch(global_texRef_interaction_coord, targetIdx) );
diffx = ( target.x - ref.x );
diffy = ( target.y - ref.y );
diffz = ( target.z - ref.z );
shortestImage (box, &diffx, &diffy, &diffz);
nbForcePoten (nonBondedInteractionType[nbForceIndex],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[nbForceIndex]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
// printf ("## %d\t%d\t%f\t%f\t%f\n",
// ii, targetIdx,
// ref.z, target.z, fz);
// printf ("%f, %f %f %f, %f %f %f, %f %f %f, %f\n",
// sqrtf(diffx*diffx+diffy*diffy+diffz*diffz),
// ref.x, ref.y, ref.z,
// target.x, target.y, target.z,
// diffx, diffy, diffz,
// dp
// );
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
skipInter:
{
}
}
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
if (ii < numAtom){
statistic_nb_buff0[ii] = myPoten * 0.5f;
statistic_nb_buff1[ii] = myVxx * 0.5f;
statistic_nb_buff2[ii] = myVyy * 0.5f;
statistic_nb_buff3[ii] = myVzz * 0.5f;
}
}
__global__ void calBondInteraction (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const RectangularBox box,
const DeviceBondList bdlist)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
ScalorType fsumx = 0.0f;
ScalorType fsumy = 0.0f;
ScalorType fsumz = 0.0f;
IndexType ii = tid + bid * blockDim.x;
if (ii >= numAtom) return;
CoordType ref;
#ifdef COMPILE_NO_TEX
ref = coord[ii];
#else
ref = tex1Dfetch(global_texRef_interaction_coord, ii);
#endif
IndexType myNumBond = bdlist.numBond[ii];
for (IndexType jj = 0; jj < bdlist.maxNumBond; ++jj){
if (jj == myNumBond) break;
IndexType targetIdx = bdlist.bondNeighborIndex[jj * bdlist.stride + ii];
CoordType target;
#ifdef COMPILE_NO_TEX
target = coord[targetIdx];
#else
target = tex1Dfetch(global_texRef_interaction_coord, targetIdx);
#endif
ScalorType diffx, diffy, diffz;
diffx = target.x - ref.x;
diffy = target.y - ref.y;
diffz = target.z - ref.z;
shortestImage (box, &diffx, &diffy, &diffz);
ScalorType fx, fy, fz;
IndexType bondFindex = bdlist.bondIndex[jj * bdlist.stride + ii];
bondForce (bondedInteractionType[bondFindex],
&bondedInteractionParameter
[bondedInteractionParameterPosition[bondFindex]],
diffx, diffy, diffz, &fx, &fy, &fz);
fsumx += fx;
fsumy += fy;
fsumz += fz;
}
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
__global__ void calBondInteraction (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const RectangularBox box,
const DeviceBondList bdlist,
ScalorType * statistic_b_buff0,
ScalorType * statistic_b_buff1,
ScalorType * statistic_b_buff2,
ScalorType * statistic_b_buff3,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
extern __shared__ volatile ScalorType buff[];
buff[tid] = 0.f;
__syncthreads();
ScalorType fsumx = 0.0f;
ScalorType fsumy = 0.0f;
ScalorType fsumz = 0.0f;
IndexType ii = tid + bid * blockDim.x;
ScalorType myPoten = 0.0f, myVxx = 0.0f, myVyy = 0.0f, myVzz = 0.0f;
if (ii < numAtom) {
CoordType ref;
#ifdef COMPILE_NO_TEX
ref = coord[ii];
#else
ref = tex1Dfetch(global_texRef_interaction_coord, ii);
#endif
IndexType myNumBond = bdlist.numBond[ii];
for (IndexType jj = 0; jj < bdlist.maxNumBond; ++jj){
if (jj == myNumBond) break;
IndexType targetIdx = bdlist.bondNeighborIndex[jj * bdlist.stride + ii];
CoordType target;
#ifdef COMPILE_NO_TEX
target = coord[targetIdx];
#else
target = tex1Dfetch(global_texRef_interaction_coord, targetIdx);
#endif
ScalorType diffx, diffy, diffz;
diffx = target.x - ref.x;
diffy = target.y - ref.y;
diffz = target.z - ref.z;
shortestImage (box, &diffx, &diffy, &diffz);
ScalorType fx, fy, fz;
IndexType bondFindex = bdlist.bondIndex[jj * bdlist.stride + ii];
ScalorType dp;
bondForcePoten (bondedInteractionType[bondFindex],
&bondedInteractionParameter
[bondedInteractionParameterPosition[bondFindex]],
diffx, diffy, diffz, &fx, &fy, &fz, &dp);
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
}
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
buff[tid] = myPoten * 0.5f;
sumVectorBlockBuffer_2 (buff);
if (threadIdx.x == 0) statistic_b_buff0[bid] = buff[0];
__syncthreads();
buff[tid] = myVxx * 0.5f;
sumVectorBlockBuffer_2 (buff);
if (threadIdx.x == 0) statistic_b_buff1[bid] = buff[0];
__syncthreads();
buff[tid] = myVyy * 0.5f;
sumVectorBlockBuffer_2 (buff);
if (threadIdx.x == 0) statistic_b_buff2[bid] = buff[0];
__syncthreads();
buff[tid] = myVzz * 0.5f;
sumVectorBlockBuffer_2 (buff);
if (threadIdx.x == 0) statistic_b_buff3[bid] = buff[0];
__syncthreads();
}
__global__ void calAngleInteraction (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const RectangularBox box,
const DeviceAngleList anglelist)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
ScalorType fsumx = 0.0f;
ScalorType fsumy = 0.0f;
ScalorType fsumz = 0.0f;
IndexType ii = tid + bid * blockDim.x;
IndexType myNumAngle;
if (ii < numAtom){
myNumAngle = anglelist.numAngle[ii];
}
else {
myNumAngle = 0;
return ;
}
// if (__all(myNumAngle == 0)) return ;
if (ii < numAtom) {
CoordType ref;
#ifdef COMPILE_NO_TEX
ref = coord[ii];
#else
ref = tex1Dfetch(global_texRef_interaction_coord, ii);
#endif
for (IndexType jj = 0; jj < myNumAngle; ++jj){
IndexType targetIdx0 =
anglelist.angleNeighborIndex[((jj<<1) ) * anglelist.stride + ii];
IndexType targetIdx1 =
anglelist.angleNeighborIndex[((jj<<1)+1) * anglelist.stride + ii];
IndexType myPosi =
anglelist.anglePosi[jj * anglelist.stride + ii];
CoordType target0, target1;
#ifdef COMPILE_NO_TEX
target0 = coord[targetIdx0];
target1 = coord[targetIdx1];
#else
target0 = tex1Dfetch(global_texRef_interaction_coord, targetIdx0);
target1 = tex1Dfetch(global_texRef_interaction_coord, targetIdx1);
#endif
ScalorType diff0x, diff0y, diff0z;
ScalorType diff1x, diff1y, diff1z;
bool center (myPosi == 1);
if (center){
diff0x = ref.x - target0.x;
diff0y = ref.y - target0.y;
diff0z = ref.z - target0.z;
diff1x = target1.x - ref.x;
diff1y = target1.y - ref.y;
diff1z = target1.z - ref.z;
} else {
diff0x = target0.x - ref.x;
diff0y = target0.y - ref.y;
diff0z = target0.z - ref.z;
diff1x = target1.x - target0.x;
diff1y = target1.y - target0.y;
diff1z = target1.z - target0.z;
}
shortestImage (box, &diff0x, &diff0y, &diff0z);
shortestImage (box, &diff1x, &diff1y, &diff1z);
ScalorType f0x, f0y, f0z;
ScalorType f1x, f1y, f1z;
IndexType angleFindex = anglelist.angleIndex[jj * anglelist.stride + ii];
angleForce (center,
bondedInteractionType[angleFindex],
&bondedInteractionParameter
[bondedInteractionParameterPosition[angleFindex]],
diff0x, diff0y, diff0z,
diff1x, diff1y, diff1z,
&f0x, &f0y, &f0z,
&f1x, &f1y, &f1z);
if (center){
fsumx += f0x + f1x;
fsumy += f0y + f1y;
fsumz += f0z + f1z;
}
else {
fsumx -= f0x;
fsumy -= f0y;
fsumz -= f0z;
}
}
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
}
__global__ void calAngleInteraction (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const RectangularBox box,
const DeviceAngleList anglelist,
ScalorType * statistic_b_buff0,
ScalorType * statistic_b_buff1,
ScalorType * statistic_b_buff2,
ScalorType * statistic_b_buff3,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
ScalorType fsumx = 0.0f;
ScalorType fsumy = 0.0f;
ScalorType fsumz = 0.0f;
ScalorType myPoten = 0.0f, myVxx = 0.0f, myVyy = 0.0f, myVzz = 0.0f;
IndexType ii = tid + bid * blockDim.x;
IndexType myNumAngle;
extern __shared__ volatile ScalorType buff[];
buff[tid] = 0.f;
__syncthreads();
if (ii < numAtom) {
CoordType ref;
#ifdef COMPILE_NO_TEX
ref = coord[ii];
#else
ref = tex1Dfetch(global_texRef_interaction_coord, ii);
#endif
myNumAngle = anglelist.numAngle[ii];
for (IndexType jj = 0; jj < myNumAngle; ++jj){
IndexType targetIdx0 =
anglelist.angleNeighborIndex[((jj<<1) ) * anglelist.stride + ii];
IndexType targetIdx1 =
anglelist.angleNeighborIndex[((jj<<1)+1) * anglelist.stride + ii];
IndexType myPosi =
anglelist.anglePosi[jj * anglelist.stride + ii];
CoordType target0, target1;
#ifdef COMPILE_NO_TEX
target0 = coord[targetIdx0];
target1 = coord[targetIdx1];
#else
target0 = tex1Dfetch(global_texRef_interaction_coord, targetIdx0);
target1 = tex1Dfetch(global_texRef_interaction_coord, targetIdx1);
#endif
ScalorType diff0x, diff0y, diff0z;
ScalorType diff1x, diff1y, diff1z;
bool center = (myPosi == 1);
if (center){
diff0x = ref.x - target0.x;
diff0y = ref.y - target0.y;
diff0z = ref.z - target0.z;
diff1x = target1.x - ref.x;
diff1y = target1.y - ref.y;
diff1z = target1.z - ref.z;
} else {
diff0x = target0.x - ref.x;
diff0y = target0.y - ref.y;
diff0z = target0.z - ref.z;
diff1x = target1.x - target0.x;
diff1y = target1.y - target0.y;
diff1z = target1.z - target0.z;
}
shortestImage (box, &diff0x, &diff0y, &diff0z);
shortestImage (box, &diff1x, &diff1y, &diff1z);
ScalorType f0x, f0y, f0z;
ScalorType f1x, f1y, f1z;
IndexType angleFindex = anglelist.angleIndex[jj * anglelist.stride + ii];
ScalorType dp;
angleForcePoten (center,
bondedInteractionType[angleFindex],
&bondedInteractionParameter
[bondedInteractionParameterPosition[angleFindex]],
diff0x, diff0y, diff0z,
diff1x, diff1y, diff1z,
&f0x, &f0y, &f0z,
&f1x, &f1y, &f1z,
&dp);
myPoten += dp;
if (center){
fsumx += f0x + f1x;
fsumy += f0y + f1y;
fsumz += f0z + f1z;
myVxx -= f0x * diff0x - f1x * diff1x;
myVyy -= f0y * diff0y - f1y * diff1y;
myVzz -= f0z * diff0z - f1z * diff1z;
}
else {
fsumx -= f0x;
fsumy -= f0y;
fsumz -= f0z;
}
}
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
buff[tid] = myPoten * 0.33333333333333333f;
sumVectorBlockBuffer_2 (buff);
if (threadIdx.x == 0) statistic_b_buff0[bid] = buff[0];
__syncthreads();
buff[tid] = myVxx;
sumVectorBlockBuffer_2 (buff);
if (threadIdx.x == 0) statistic_b_buff1[bid] = buff[0];
__syncthreads();
buff[tid] = myVyy;
sumVectorBlockBuffer_2 (buff);
if (threadIdx.x == 0) statistic_b_buff2[bid] = buff[0];
__syncthreads();
buff[tid] = myVzz;
sumVectorBlockBuffer_2 (buff);
if (threadIdx.x == 0) statistic_b_buff3[bid] = buff[0];
__syncthreads();
}
// static __device__ IndexType shiftedD3toD1 (
// DeviceCellList clist,
// RectangularBoxGeometry::RectangularBox box,
// int ix, int iy, int iz,
// ScalorType * shiftx , ScalorType * shifty, ScalorType * shiftz)
// {
// int tmp;
// ix += (tmp = -int(floorf(ix * clist.NCelli.x))) * clist.NCell.x;
// *shiftx = tmp * box.size.x;
// iy += (tmp = -int(floorf(iy * clist.NCelli.y))) * clist.NCell.y;
// *shifty = tmp * box.size.y;
// iz += (tmp = -int(floorf(iz * clist.NCelli.z))) * clist.NCell.z;
// *shiftz = tmp * box.size.z;
// return D3toD1 (clist.NCell, ix, iy, iz);
// }
// __global__ void calNonBondedInteraction (
// const IndexType numAtom,
// const CoordType * coord,
// ScalorType * forcx,
// ScalorType * forcy,
// ScalorType * forcz,
// const TypeType * type,
// const RectangularBox box,
// DeviceCellList clist,
// mdError_t * ptr_de)
// {
// // RectangularBoxGeometry::normalizeSystem (box, &ddata);
// IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
// IndexType tid = threadIdx.x;
// IndexType bidx, bidy, bidz;
// D1toD3 (clist.NCell, bid, bidx, bidy, bidz);
// // load index
// IndexType ii = getDeviceCellListData (clist, bid, tid);
// // load iith coordinate // use texturefetch instead
// CoordType ref;
// TypeType reftype;
// ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
// if (ii != MaxIndexValue){
// #ifdef COMPILE_NO_TEX
// ref = coord[ii];
// reftype = type[ii];
// #else
// ref = tex1Dfetch (global_texRef_interaction_coord, ii);
// // reftype = tex1Dfetch(global_texRef_interaction_type, ii);
// #endif
// }
// ScalorType rlist = clist.rlist;
// // the target index and coordinates are shared
// extern __shared__ volatile char pub_sbuff[];
// volatile IndexType * targetIndexes =
// (volatile IndexType *) pub_sbuff;
// CoordType * target =
// (CoordType *) &targetIndexes[roundUp4(blockDim.x)];
// volatile TypeType * targettype =
// (volatile TypeType *) &target[roundUp4(blockDim.x)];
// __syncthreads();
// // bool oneCellX(false), oneCellY(false), oneCellZ(false);
// // if (clist.NCell.x == 1) oneCellX = true;
// // if (clist.NCell.y == 1) oneCellY = true;
// // if (clist.NCell.z == 1) oneCellZ = true;
// // int upperx(1), lowerx(-1);
// // int uppery(1), lowery(-1);
// // int upperz(1), lowerz(-1);
// // if (oneCellX) {lowerx = 0; upperx = 0;}
// // if (oneCellY) {lowery = 0; uppery = 0;}
// // if (oneCellZ) {lowerz = 0; upperz = 0;}
// ScalorType rlist2 = rlist * rlist;
// // loop over 27 neighbor cells
// #pragma unroll 3
// // for (int nci = bidx + lowerx; nci <= bidx + upperx; ++nci){
// // for (int ncj = bidy + lowery; ncj <= bidy + uppery; ++ncj){
// // for (int nck = bidz + lowerz; nck <= bidz + upperz; ++nck){
// for (int nci = int(bidx) - 1; nci <= int(bidx) + 1; ++nci){
// for (int ncj = int(bidy) - 1; ncj <= int(bidy) + 1; ++ncj){
// for (int nck = int(bidz) - 1; nck <= int(bidz) + 1; ++nck){
// // for (int di = lowerx; di <= upperx; ++di){
// // for (int dj = lowery; dj <= uppery; ++dj){
// // for (int dk = lowerz; dk <= upperz; ++dk){
// __syncthreads();
// // the shift value of a cell is pre-computed
// ScalorType xshift, yshift, zshift;
// // int nci = di + bidx;
// // int ncj = dj + bidy;
// // int nck = dk + bidz;
// IndexType targetCellIdx = shiftedD3toD1 (clist, box,
// nci, ncj, nck,
// &xshift, &yshift, &zshift);
// // load target index and coordinates form global memary
// // IndexType tmp = (targetIndexes[tid] =
// // getDeviceCellListData(clist, targetCellIdx, tid));
// targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid);
// if (targetIndexes[tid] != MaxIndexValue){
// // #ifdef COMPILE_NO_TEX
// // target[tid] = coord[tmp];
// // // targettype[tid] = type[tmp];
// // #else
// target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]);
// // targettype[tid] = tex1Dfetch(global_texRef_interaction_type, tmp);
// // #endif
// }
// __syncthreads();
// // find neighbor
// if (ii != MaxIndexValue){
// for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){
// // if (targetIndexes[jj] == MaxIndexValue) break;
// ScalorType diffx = target[jj].x - xshift - ref.x;
// ScalorType diffy = target[jj].y - yshift - ref.y;
// ScalorType diffz = target[jj].z - zshift - ref.z;
// // if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx);
// // if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy);
// // if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz);
// //printf ("%d\t%d\t%f\t%f\n", ii,
// ScalorType dr2;
// if ((dr2 = (diffx*diffx+diffy*diffy+diffz*diffz)) < rlist2 &&
// targetIndexes[jj] != ii){
// IndexType fidx(0);
// // fidx = AtomNBForceTable::calForceIndex (
// // nonBondedInteractionTable,
// // const_numAtomType[0],
// // reftype,
// // targettype[jj]);
// // if (fidx != mdForceNULL) {
// ScalorType fx, fy, fz;
// nbForce (nonBondedInteractionType[fidx],
// &nonBondedInteractionParameter
// [nonBondedInteractionParameterPosition[fidx]],
// diffx, diffy, diffz,
// dr2,
// &fx, &fy, &fz);
// fsumx += fx;
// fsumy += fy;
// fsumz += fz;
// // }
// }
// }
// }
// }
// }
// }
// if (ii != MaxIndexValue){
// forcx[ii] += fsumx;
// forcy[ii] += fsumy;
// forcz[ii] += fsumz;
// }
// }
// __global__ void calNonBondedInteraction (
// const IndexType numAtom,
// const CoordType * coord,
// ScalorType * forcx,
// ScalorType * forcy,
// ScalorType * forcz,
// const TypeType * type,
// const RectangularBox box,
// DeviceCellList clist,
// ScalorType * statistic_nb_buff0,
// ScalorType * statistic_nb_buff1,
// ScalorType * statistic_nb_buff2,
// ScalorType * statistic_nb_buff3,
// mdError_t * ptr_de)
// {
// // RectangularBoxGeometry::normalizeSystem (box, &ddata);
// IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
// IndexType tid = threadIdx.x;
// IndexType bidx, bidy, bidz;
// D1toD3 (clist.NCell, bid, bidx, bidy, bidz);
// // load index
// IndexType ii = getDeviceCellListData (clist, bid, tid);
// // load iith coordinate // use texturefetch instead
// CoordType ref;
// TypeType reftype;
// ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
// ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f);
// if (ii != MaxIndexValue){
// #ifdef COMPILE_NO_TEX
// ref = coord[ii];
// reftype = type[ii];
// #else
// ref = tex1Dfetch (global_texRef_interaction_coord, ii);
// reftype = tex1Dfetch(global_texRef_interaction_type, ii);
// #endif
// }
// ScalorType rlist = clist.rlist;
// // the target index and coordinates are shared
// extern __shared__ volatile char pub_sbuff[];
// volatile IndexType * targetIndexes =
// (volatile IndexType *) pub_sbuff;
// CoordType * target =
// (CoordType *) &targetIndexes[roundUp4(blockDim.x)];
// volatile TypeType * targettype =
// (volatile TypeType *) &target[roundUp4(blockDim.x)];
// __syncthreads();
// // bool oneCellX(false), oneCellY(false), oneCellZ(false);
// // if (clist.NCell.x == 1) oneCellX = true;
// // if (clist.NCell.y == 1) oneCellY = true;
// // if (clist.NCell.z == 1) oneCellZ = true;
// // int upperx(1), lowerx(-1);
// // int uppery(1), lowery(-1);
// // int upperz(1), lowerz(-1);
// // if (oneCellX) {lowerx = 0; upperx = 0;}
// // if (oneCellY) {lowery = 0; uppery = 0;}
// // if (oneCellZ) {lowerz = 0; upperz = 0;}
// ScalorType rlist2 = rlist * rlist;
// // loop over 27 neighbor cells
// #pragma unroll 3
// for (int nci = int(bidx) - 1; nci <= int(bidx) + 1; ++nci){
// for (int ncj = int(bidy) - 1; ncj <= int(bidy) + 1; ++ncj){
// for (int nck = int(bidz) - 1; nck <= int(bidz) + 1; ++nck){
// // for (int di = lowerx; di <= upperx; ++di){
// // for (int dj = lowery; dj <= uppery; ++dj){
// // for (int dk = lowerz; dk <= upperz; ++dk){
// __syncthreads();
// // the shift value of a cell is pre-computed
// ScalorType xshift, yshift, zshift;
// // int nci = di + bidx;
// // int ncj = dj + bidy;
// // int nck = dk + bidz;
// IndexType targetCellIdx = shiftedD3toD1 (clist, box,
// nci, ncj, nck,
// &xshift, &yshift, &zshift);
// // load target index and coordinates form global memary
// IndexType tmp = (targetIndexes[tid] =
// getDeviceCellListData(clist, targetCellIdx, tid));
// if (tmp != MaxIndexValue){
// #ifdef COMPILE_NO_TEX
// target[tid] = coord[tmp];
// targettype[tid] = type[tmp];
// #else
// target[tid] = tex1Dfetch(global_texRef_interaction_coord, tmp);
// targettype[tid] = tex1Dfetch(global_texRef_interaction_type, tmp);
// #endif
// }
// __syncthreads();
// // find neighbor
// if (ii != MaxIndexValue){
// for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){
// ScalorType diffx = target[jj].x - xshift - ref.x;
// ScalorType diffy = target[jj].y - yshift - ref.y;
// ScalorType diffz = target[jj].z - zshift - ref.z;
// // if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx);
// // if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy);
// // if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz);
// //printf ("%d\t%d\t%f\t%f\n", ii,
// if ((diffx*diffx+diffy*diffy+diffz*diffz) < rlist2 &&
// targetIndexes[jj] != ii){
// IndexType fidx(0);
// // fidx = AtomNBForceTable::calForceIndex (
// // nonBondedInteractionTable,
// // const_numAtomType[0],
// // reftype,
// // targettype[jj]);
// // if (fidx != mdForceNULL) {
// ScalorType fx, fy, fz, dp;
// nbForcePoten (nonBondedInteractionType[fidx],
// &nonBondedInteractionParameter
// [nonBondedInteractionParameterPosition[fidx]],
// diffx, diffy, diffz,
// &fx, &fy, &fz, &dp);
// myPoten += dp;
// myVxx += fx * diffx;
// myVyy += fy * diffy;
// myVzz += fz * diffz;
// fsumx += fx;
// fsumy += fy;
// fsumz += fz;
// // }
// }
// }
// }
// }
// }
// }
// if (ii != MaxIndexValue){
// forcx[ii] += fsumx;
// forcy[ii] += fsumy;
// forcz[ii] += fsumz;
// statistic_nb_buff0[ii] = myPoten * 0.5f;
// statistic_nb_buff1[ii] = myVxx * 0.5f;
// statistic_nb_buff2[ii] = myVyy * 0.5f;
// statistic_nb_buff3[ii] = myVzz * 0.5f;
// }
// }
__global__ void
calNonBondedInteraction_cell (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const DeviceCellList clist,
const ScalorType rcut,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType bidx, bidy, bidz;
D1toD3 (clist.NCell, bid, bidx, bidy, bidz);
// load index
IndexType ii = getDeviceCellListData (clist, bid, tid);
// load iith coordinate // use texturefetch instead
CoordType ref;
TypeType reftype;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
if (ii != MaxIndexValue){
#ifdef COMPILE_NO_TEX
ref = coord[ii];
reftype = type[ii];
#else
ref = tex1Dfetch (global_texRef_interaction_coord, ii);
reftype = tex1Dfetch(global_texRef_interaction_type, ii);
#endif
}
// the target index and coordinates are shared
extern __shared__ volatile char pub_sbuff[];
volatile IndexType * targetIndexes =
(volatile IndexType *) pub_sbuff;
CoordType * target =
(CoordType *) &targetIndexes[roundUp4(blockDim.x)];
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
ScalorType rcut2 = rcut * rcut;
for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){
__syncthreads();
IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i);
CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i);
CoordType shift;
shift.x = shiftNoi.x * box.size.x;
shift.y = shiftNoi.y * box.size.y;
shift.z = shiftNoi.z * box.size.z;
targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid);
if (targetIndexes[tid] != MaxIndexValue){
target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]);
targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]);
}
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (clist.NCell.x == 1) oneCellX = true;
if (clist.NCell.y == 1) oneCellY = true;
if (clist.NCell.z == 1) oneCellZ = true;
__syncthreads();
// find neighbor
if (ii != MaxIndexValue){
for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){
// if (targetIndexes[jj] == MaxIndexValue) break;
ScalorType diffx = target[jj].x - shift.x - ref.x;
ScalorType diffy = target[jj].y - shift.y - ref.y;
ScalorType diffz = target[jj].z - shift.z - ref.z;
if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx);
if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy);
if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz);
//printf ("%d\t%d\t%f\t%f\n", ii,
// ScalorType dr2;
if (((diffx*diffx+diffy*diffy+diffz*diffz)) < rcut2 &&
targetIndexes[jj] != ii){
IndexType fidx(0);
fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[jj]);
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz;
nbForce (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
// dr2,
&fx, &fy, &fz);
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
}
}
}
if (ii != MaxIndexValue){
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
}
__global__ void
calNonBondedInteraction_cell (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const DeviceCellList clist,
const ScalorType rcut,
ScalorType * statistic_nb_buff0,
ScalorType * statistic_nb_buff1,
ScalorType * statistic_nb_buff2,
ScalorType * statistic_nb_buff3,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType bidx, bidy, bidz;
D1toD3 (clist.NCell, bid, bidx, bidy, bidz);
// load index
IndexType ii = getDeviceCellListData (clist, bid, tid);
// load iith coordinate // use texturefetch instead
CoordType ref;
TypeType reftype;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f);
if (ii != MaxIndexValue){
#ifdef COMPILE_NO_TEX
ref = coord[ii];
reftype = type[ii];
#else
ref = tex1Dfetch (global_texRef_interaction_coord, ii);
reftype = tex1Dfetch(global_texRef_interaction_type, ii);
#endif
}
// the target index and coordinates are shared
extern __shared__ volatile char pub_sbuff[];
volatile IndexType * targetIndexes =
(volatile IndexType *) pub_sbuff;
CoordType * target =
(CoordType *) &targetIndexes[roundUp4(blockDim.x)];
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
ScalorType rcut2 = rcut * rcut;
for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){
__syncthreads();
IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i);
CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i);
CoordType shift;
shift.x = shiftNoi.x * box.size.x;
shift.y = shiftNoi.y * box.size.y;
shift.z = shiftNoi.z * box.size.z;
targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid);
if (targetIndexes[tid] != MaxIndexValue){
target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]);
targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]);
}
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (clist.NCell.x == 1) oneCellX = true;
if (clist.NCell.y == 1) oneCellY = true;
if (clist.NCell.z == 1) oneCellZ = true;
__syncthreads();
// find neighbor
if (ii != MaxIndexValue){
for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){
// if (targetIndexes[jj] == MaxIndexValue) break;
ScalorType diffx = target[jj].x - shift.x - ref.x;
ScalorType diffy = target[jj].y - shift.y - ref.y;
ScalorType diffz = target[jj].z - shift.z - ref.z;
if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx);
if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy);
if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz);
//printf ("%d\t%d\t%f\t%f\n", ii,
// ScalorType dr2;
if (((diffx*diffx+diffy*diffy+diffz*diffz)) < rcut2 &&
targetIndexes[jj] != ii){
IndexType fidx(0);
fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[jj]);
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz, dp;
nbForcePoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
}
}
}
if (ii != MaxIndexValue){
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
statistic_nb_buff0[ii] = myPoten * 0.5f;
statistic_nb_buff1[ii] = myVxx * 0.5f;
statistic_nb_buff2[ii] = myVyy * 0.5f;
statistic_nb_buff3[ii] = myVzz * 0.5f;
}
}
__global__ void
calNonBondedInteraction (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
DeviceCellList clist,
const ScalorType rcut,
DeviceNeighborList nlist,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType bidx, bidy, bidz;
D1toD3 (clist.NCell, bid, bidx, bidy, bidz);
IndexType Nneighbor = 0;
// load index
IndexType ii = getDeviceCellListData (clist, bid, tid);
// load iith coordinate // use texturefetch instead
CoordType ref;
TypeType reftype;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
if (ii != MaxIndexValue){
#ifdef COMPILE_NO_TEX
ref = coord[ii];
reftype = type[ii];
#else
ref = tex1Dfetch (global_texRef_interaction_coord, ii);
reftype = tex1Dfetch(global_texRef_interaction_type, ii);
#endif
}
// ScalorType rlist = clist.rlist;
// the target index and coordinates are shared
extern __shared__ volatile char pub_sbuff[];
volatile IndexType * targetIndexes =
(volatile IndexType *) pub_sbuff;
CoordType * target =
(CoordType *) &targetIndexes[roundUp4(blockDim.x)];
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
ScalorType rlist2 = nlist.rlist * nlist.rlist;
ScalorType rcut2 = rcut * rcut;
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (clist.NCell.x == 1) oneCellX = true;
if (clist.NCell.y == 1) oneCellY = true;
if (clist.NCell.z == 1) oneCellZ = true;
for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){
__syncthreads();
IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i);
CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i);
CoordType shift;
shift.x = shiftNoi.x * box.size.x;
shift.y = shiftNoi.y * box.size.y;
shift.z = shiftNoi.z * box.size.z;
targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid);
if (targetIndexes[tid] != MaxIndexValue){
target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]);
targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]);
}
__syncthreads();
// find neighbor
if (ii != MaxIndexValue){
for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){
// if (targetIndexes[jj] == MaxIndexValue) break;
ScalorType diffx = target[jj].x - shift.x - ref.x;
ScalorType diffy = target[jj].y - shift.y - ref.y;
ScalorType diffz = target[jj].z - shift.z - ref.z;
if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx);
if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy);
if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz);
ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz);
IndexType fidx(0);
fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[jj]);
if (dr2 < rcut2 &&
targetIndexes[jj] != ii){
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz, dp;
nbForcePoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
if (dr2 < rlist2 &&
targetIndexes[jj] != ii){
IndexType listIdx = Nneighbor * nlist.stride + ii;
nlist.data[listIdx] = targetIndexes[jj];
nlist.forceIndex[listIdx] = fidx;
Nneighbor ++;
}
}
}
}
if (ii != MaxIndexValue){
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
if (Nneighbor > nlist.listLength && ptr_de != NULL){
*ptr_de = mdErrorShortNeighborList;
return;
}
nlist.Nneighbor[ii] = Nneighbor;
}
}
__global__ void
calNonBondedInteraction (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
DeviceCellList clist,
const ScalorType rcut,
DeviceNeighborList nlist,
ScalorType * statistic_nb_buff0,
ScalorType * statistic_nb_buff1,
ScalorType * statistic_nb_buff2,
ScalorType * statistic_nb_buff3,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType bidx, bidy, bidz;
D1toD3 (clist.NCell, bid, bidx, bidy, bidz);
IndexType Nneighbor = 0;
// load index
IndexType ii = getDeviceCellListData (clist, bid, tid);
// load iith coordinate // use texturefetch instead
CoordType ref;
TypeType reftype;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f);
if (ii != MaxIndexValue){
#ifdef COMPILE_NO_TEX
ref = coord[ii];
reftype = type[ii];
#else
ref = tex1Dfetch (global_texRef_interaction_coord, ii);
reftype = tex1Dfetch(global_texRef_interaction_type, ii);
#endif
}
// ScalorType rlist = clist.rlist;
// the target index and coordinates are shared
extern __shared__ volatile char pub_sbuff[];
volatile IndexType * targetIndexes =
(volatile IndexType *) pub_sbuff;
CoordType * target =
(CoordType *) &targetIndexes[roundUp4(blockDim.x)];
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
ScalorType rlist2 = nlist.rlist * nlist.rlist;
ScalorType rcut2 = rcut * rcut;
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (clist.NCell.x == 1) oneCellX = true;
if (clist.NCell.y == 1) oneCellY = true;
if (clist.NCell.z == 1) oneCellZ = true;
for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){
__syncthreads();
IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i);
CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i);
CoordType shift;
shift.x = shiftNoi.x * box.size.x;
shift.y = shiftNoi.y * box.size.y;
shift.z = shiftNoi.z * box.size.z;
targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid);
if (targetIndexes[tid] != MaxIndexValue){
target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]);
targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]);
}
__syncthreads();
// find neighbor
if (ii != MaxIndexValue){
for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){
ScalorType diffx = target[jj].x - shift.x - ref.x;
ScalorType diffy = target[jj].y - shift.y - ref.y;
ScalorType diffz = target[jj].z - shift.z - ref.z;
if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx);
if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy);
if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz);
ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz);
IndexType fidx(0);
fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[jj]);
if (dr2 < rcut2 &&
targetIndexes[jj] != ii){
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz, dp;
nbForcePoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
if (dr2 < rlist2 &&
targetIndexes[jj] != ii){
IndexType listIdx = Nneighbor * nlist.stride + ii;
nlist.data[listIdx] = targetIndexes[jj];
nlist.forceIndex[listIdx] = fidx;
Nneighbor ++;
}
}
}
}
if (ii != MaxIndexValue){
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
statistic_nb_buff0[ii] = myPoten * 0.5f;
statistic_nb_buff1[ii] = myVxx * 0.5f;
statistic_nb_buff2[ii] = myVyy * 0.5f;
statistic_nb_buff3[ii] = myVzz * 0.5f;
if (Nneighbor > nlist.listLength && ptr_de != NULL){
*ptr_de = mdErrorShortNeighborList;
return;
}
nlist.Nneighbor[ii] = Nneighbor;
}
}
__global__ void
calNonBondedInteraction_all (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const ScalorType rcut,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType numberAtom = numAtom;
IndexType ii = tid + bid * blockDim.x;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
extern __shared__ volatile char pub_sbuff[];
volatile CoordType * target =
(volatile CoordType *) pub_sbuff;
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
__syncthreads();
CoordType ref;
TypeType reftype;
if (ii < numberAtom){
ref = coord[ii];
reftype = type[ii];
}
ScalorType rcut2 = rcut * rcut;
for (IndexType targetBlockId = 0;
targetBlockId * blockDim.x < numberAtom; ++targetBlockId){
IndexType jj = tid + targetBlockId * blockDim.x;
__syncthreads();
if (jj < numberAtom){
target[tid].x = coord[jj].x;
target[tid].y = coord[jj].y;
target[tid].z = coord[jj].z;
targettype[tid] = type[jj];
}
__syncthreads();
if (ii < numberAtom){
for (IndexType kk = 0; kk < blockDim.x; ++kk){
if (kk + targetBlockId * blockDim.x >= numberAtom) break;
ScalorType diffx = target[kk].x - ref.x;
ScalorType diffy = target[kk].y - ref.y;
ScalorType diffz = target[kk].z - ref.z;
shortestImage (box, &diffx, &diffy, &diffz);
ScalorType dr2;
if ((dr2 = diffx*diffx+diffy*diffy+diffz*diffz) < rcut2 &&
kk + targetBlockId * blockDim.x != ii){
IndexType fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[kk]);
if (dr2 < rcut2 ) {
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz, dp;
nbForcePoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
}
}
}
}
if (ii < numberAtom){
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
}
__global__ void
calNonBondedInteraction_all (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const ScalorType rcut,
ScalorType * statistic_nb_buff0,
ScalorType * statistic_nb_buff1,
ScalorType * statistic_nb_buff2,
ScalorType * statistic_nb_buff3,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType numberAtom = numAtom;
IndexType ii = tid + bid * blockDim.x;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f);
extern __shared__ volatile char pub_sbuff[];
volatile CoordType * target =
(volatile CoordType *) pub_sbuff;
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
__syncthreads();
CoordType ref;
TypeType reftype;
if (ii < numberAtom){
ref = coord[ii];
reftype = type[ii];
}
ScalorType rcut2 = rcut * rcut;
for (IndexType targetBlockId = 0;
targetBlockId * blockDim.x < numberAtom; ++targetBlockId){
IndexType jj = tid + targetBlockId * blockDim.x;
__syncthreads();
if (jj < numberAtom){
target[tid].x = coord[jj].x;
target[tid].y = coord[jj].y;
target[tid].z = coord[jj].z;
targettype[tid] = type[jj];
}
__syncthreads();
if (ii < numberAtom){
for (IndexType kk = 0; kk < blockDim.x; ++kk){
if (kk + targetBlockId * blockDim.x >= numberAtom) break;
ScalorType diffx = target[kk].x - ref.x;
ScalorType diffy = target[kk].y - ref.y;
ScalorType diffz = target[kk].z - ref.z;
shortestImage (box, &diffx, &diffy, &diffz);
ScalorType dr2;
if ((dr2 = diffx*diffx+diffy*diffy+diffz*diffz) < rcut2 &&
kk + targetBlockId * blockDim.x != ii){
IndexType fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[kk]);
if (dr2 < rcut2 ) {
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz, dp;
nbForcePoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
}
}
}
}
if (ii < numberAtom){
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
statistic_nb_buff0[ii] = myPoten * 0.5f;
statistic_nb_buff1[ii] = myVxx * 0.5f;
statistic_nb_buff2[ii] = myVyy * 0.5f;
statistic_nb_buff3[ii] = myVzz * 0.5f;
}
}
__global__ void
calTwinRangeCorrection_cell (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const DeviceCellList clist,
const ScalorType rcut1,
const ScalorType rcut2,
ScalorType * statistic_nb_buff0,
ScalorType * statistic_nb_buff1,
ScalorType * statistic_nb_buff2,
ScalorType * statistic_nb_buff3,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType bidx, bidy, bidz;
D1toD3 (clist.NCell, bid, bidx, bidy, bidz);
// load index
IndexType ii = getDeviceCellListData (clist, bid, tid);
// load iith coordinate // use texturefetch instead
CoordType ref;
TypeType reftype;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f);
if (ii != MaxIndexValue){
#ifdef COMPILE_NO_TEX
ref = coord[ii];
reftype = type[ii];
#else
ref = tex1Dfetch (global_texRef_interaction_coord, ii);
reftype = tex1Dfetch(global_texRef_interaction_type, ii);
#endif
}
// the target index and coordinates are shared
extern __shared__ volatile char pub_sbuff[];
volatile IndexType * targetIndexes =
(volatile IndexType *) pub_sbuff;
CoordType * target =
(CoordType *) &targetIndexes[roundUp4(blockDim.x)];
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
ScalorType rcut12 = rcut1 * rcut1;
ScalorType rcut22 = rcut2 * rcut2;
for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){
__syncthreads();
IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i);
CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i);
CoordType shift;
shift.x = shiftNoi.x * box.size.x;
shift.y = shiftNoi.y * box.size.y;
shift.z = shiftNoi.z * box.size.z;
targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid);
if (targetIndexes[tid] != MaxIndexValue){
target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]);
targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]);
}
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (clist.NCell.x == 1) oneCellX = true;
if (clist.NCell.y == 1) oneCellY = true;
if (clist.NCell.z == 1) oneCellZ = true;
__syncthreads();
// find neighbor
if (ii != MaxIndexValue){
for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){
// if (targetIndexes[jj] == MaxIndexValue) break;
ScalorType diffx = target[jj].x - shift.x - ref.x;
ScalorType diffy = target[jj].y - shift.y - ref.y;
ScalorType diffz = target[jj].z - shift.z - ref.z;
if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx);
if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy);
if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz);
ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz);
if (dr2 < rcut22 && dr2 >= rcut12 &&
targetIndexes[jj] != ii){
IndexType fidx(0);
fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[jj]);
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz, dp;
nbForcePoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
// printf ("# %d\t%d\t%f\t%f\t%f\n",
// ii, targetIndexes[jj],
// ref.z, target[jj].z, fz);
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
}
}
}
if (ii != MaxIndexValue){
forcx[ii] = fsumx;
forcy[ii] = fsumy;
forcz[ii] = fsumz;
statistic_nb_buff0[ii] = myPoten * 0.5f;
statistic_nb_buff1[ii] = myVxx * 0.5f;
statistic_nb_buff2[ii] = myVyy * 0.5f;
statistic_nb_buff3[ii] = myVzz * 0.5f;
}
}
__global__ void
calTwinRangeCorrection_all (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const ScalorType rcut1,
const ScalorType rcut2,
ScalorType * statistic_nb_buff0,
ScalorType * statistic_nb_buff1,
ScalorType * statistic_nb_buff2,
ScalorType * statistic_nb_buff3,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType numberAtom = numAtom;
IndexType ii = tid + bid * blockDim.x;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f);
extern __shared__ volatile char pub_sbuff[];
volatile CoordType * target =
(volatile CoordType *) pub_sbuff;
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
__syncthreads();
CoordType ref;
TypeType reftype;
if (ii < numberAtom){
ref = coord[ii];
reftype = type[ii];
}
ScalorType rcut12 = rcut1 * rcut1;
ScalorType rcut22 = rcut2 * rcut2;
for (IndexType targetBlockId = 0;
targetBlockId * blockDim.x < numberAtom; ++targetBlockId){
IndexType jj = tid + targetBlockId * blockDim.x;
__syncthreads();
if (jj < numberAtom){
target[tid].x = coord[jj].x;
target[tid].y = coord[jj].y;
target[tid].z = coord[jj].z;
targettype[tid] = type[jj];
}
__syncthreads();
if (ii < numberAtom){
for (IndexType kk = 0; kk < blockDim.x; ++kk){
if (kk + targetBlockId * blockDim.x >= numberAtom) break;
ScalorType diffx = target[kk].x - ref.x;
ScalorType diffy = target[kk].y - ref.y;
ScalorType diffz = target[kk].z - ref.z;
shortestImage (box, &diffx, &diffy, &diffz);
ScalorType dr2 = diffx*diffx+diffy*diffy+diffz*diffz;
if (dr2 < rcut22 && dr2 >= rcut12 &&
kk + targetBlockId * blockDim.x != ii){
IndexType fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[kk]);
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz, dp;
nbForcePoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
}
}
}
if (ii < numberAtom){
forcx[ii] = fsumx;
forcy[ii] = fsumy;
forcz[ii] = fsumz;
statistic_nb_buff0[ii] = myPoten * 0.5f;
statistic_nb_buff1[ii] = myVxx * 0.5f;
statistic_nb_buff2[ii] = myVyy * 0.5f;
statistic_nb_buff3[ii] = myVzz * 0.5f;
}
}
__global__ void
buildNeighborListCalTwinRangeCorr_cell (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const DeviceCellList clist,
const ScalorType rcut1,
const ScalorType rcut2,
DeviceNeighborList nlist,
ScalorType * statistic_nb_buff0,
ScalorType * statistic_nb_buff1,
ScalorType * statistic_nb_buff2,
ScalorType * statistic_nb_buff3,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType bidx, bidy, bidz;
D1toD3 (clist.NCell, bid, bidx, bidy, bidz);
// set number of neighbor to 0
IndexType Nneighbor = 0;
// load index
IndexType ii = getDeviceCellListData (clist, bid, tid);
// load iith coordinate // use texturefetch instead
CoordType ref;
TypeType reftype;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f);
if (ii != MaxIndexValue){
#ifdef COMPILE_NO_TEX
ref = coord[ii];
reftype = type[ii];
#else
ref = tex1Dfetch (global_texRef_interaction_coord, ii);
reftype = tex1Dfetch(global_texRef_interaction_type, ii);
#endif
}
// the target index and coordinates are shared
extern __shared__ volatile char pub_sbuff[];
volatile IndexType * targetIndexes =
(volatile IndexType *) pub_sbuff;
CoordType * target =
(CoordType *) &targetIndexes[roundUp4(blockDim.x)];
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
ScalorType rcut12 = rcut1 * rcut1;
ScalorType rcut22 = rcut2 * rcut2;
for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){
__syncthreads();
IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i);
CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i);
CoordType shift;
shift.x = shiftNoi.x * box.size.x;
shift.y = shiftNoi.y * box.size.y;
shift.z = shiftNoi.z * box.size.z;
targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid);
if (targetIndexes[tid] != MaxIndexValue){
target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]);
targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]);
}
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (clist.NCell.x == 1) oneCellX = true;
if (clist.NCell.y == 1) oneCellY = true;
if (clist.NCell.z == 1) oneCellZ = true;
__syncthreads();
// find neighbor
if (ii != MaxIndexValue){
for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){
// if (targetIndexes[jj] == MaxIndexValue) break;
ScalorType diffx = target[jj].x - shift.x - ref.x;
ScalorType diffy = target[jj].y - shift.y - ref.y;
ScalorType diffz = target[jj].z - shift.z - ref.z;
if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx);
if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy);
if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz);
ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz);
if (targetIndexes[jj] != ii){
if (dr2 < rcut22 && dr2 >= rcut12 ){
IndexType fidx(0);
fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[jj]);
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz, dp;
nbForcePoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
// printf ("# %d\t%d\t%f\t%f\t%f\n",
// ii, targetIndexes[jj],
// ref.z, target[jj].z, fz);
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
else if (dr2 < rcut12){
IndexType fidx(0);
fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[jj]);
IndexType listIdx = Nneighbor * nlist.stride + ii;
nlist.data[listIdx] = targetIndexes[jj];
nlist.forceIndex[listIdx] = fidx;
Nneighbor ++;
}
}
}
}
}
if (ii != MaxIndexValue){
forcx[ii] = fsumx;
forcy[ii] = fsumy;
forcz[ii] = fsumz;
statistic_nb_buff0[ii] = myPoten * 0.5f;
statistic_nb_buff1[ii] = myVxx * 0.5f;
statistic_nb_buff2[ii] = myVyy * 0.5f;
statistic_nb_buff3[ii] = myVzz * 0.5f;
if (Nneighbor > nlist.listLength && ptr_de != NULL){
*ptr_de = mdErrorShortNeighborList;
}
nlist.Nneighbor[ii] = Nneighbor;
}
}
__global__ void
buildNeighborListCalTwinRangeCorr_all (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const ScalorType rcut1,
const ScalorType rcut2,
DeviceNeighborList nlist,
ScalorType * statistic_nb_buff0,
ScalorType * statistic_nb_buff1,
ScalorType * statistic_nb_buff2,
ScalorType * statistic_nb_buff3,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType numberAtom = numAtom;
IndexType Nneighbor = 0;
IndexType ii = tid + bid * blockDim.x;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f);
extern __shared__ volatile char pub_sbuff[];
volatile CoordType * target =
(volatile CoordType *) pub_sbuff;
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
__syncthreads();
CoordType ref;
TypeType reftype;
if (ii < numberAtom){
ref = coord[ii];
reftype = type[ii];
}
ScalorType rcut12 = rcut1 * rcut1;
ScalorType rcut22 = rcut2 * rcut2;
for (IndexType targetBlockId = 0;
targetBlockId * blockDim.x < numberAtom; ++targetBlockId){
IndexType jj = tid + targetBlockId * blockDim.x;
__syncthreads();
if (jj < numberAtom){
target[tid].x = coord[jj].x;
target[tid].y = coord[jj].y;
target[tid].z = coord[jj].z;
targettype[tid] = type[jj];
}
__syncthreads();
if (ii < numberAtom){
for (IndexType kk = 0; kk < blockDim.x; ++kk){
if (kk + targetBlockId * blockDim.x >= numberAtom) break;
ScalorType diffx = target[kk].x - ref.x;
ScalorType diffy = target[kk].y - ref.y;
ScalorType diffz = target[kk].z - ref.z;
shortestImage (box, &diffx, &diffy, &diffz);
ScalorType dr2 = diffx*diffx+diffy*diffy+diffz*diffz;
if (kk + targetBlockId * blockDim.x != ii){
if (dr2 < rcut22 && dr2 >= rcut12 ){
IndexType fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[kk]);
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz, dp;
nbForcePoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
else if (dr2 < rcut12){
IndexType fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[kk]);
IndexType listIdx = Nneighbor * nlist.stride + ii;
nlist.data[listIdx] = kk + targetBlockId * blockDim.x;
nlist.forceIndex[listIdx] = fidx;
Nneighbor ++;
}
}
}
}
}
if (ii < numberAtom){
forcx[ii] = fsumx;
forcy[ii] = fsumy;
forcz[ii] = fsumz;
statistic_nb_buff0[ii] = myPoten * 0.5f;
statistic_nb_buff1[ii] = myVxx * 0.5f;
statistic_nb_buff2[ii] = myVyy * 0.5f;
statistic_nb_buff3[ii] = myVzz * 0.5f;
if (Nneighbor > nlist.listLength && ptr_de != NULL){
*ptr_de = mdErrorShortNeighborList;
}
nlist.Nneighbor[ii] = Nneighbor;
}
}
__global__ void
widomDeltaPoten_NVT (const IndexType numTestParticle,
const CoordType * coordTestParticle,
const TypeType * typeTestParticle,
const IndexType numAtom,
const CoordType * coord,
const TypeType * type,
const RectangularBox box,
DeviceCellList clist,
ScalorType * statistic_nb_buff0,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
// IndexType ii = tid + bid * blockDim.x;
if (bid >= numTestParticle) return;
// extern __shared__ volatile char pub_sbuff_widom[];
// volatile ScalorType * sumbuff = (volatile ScalorType *) pub_sbuff_widom;
extern __shared__ volatile ScalorType sumbuff [];
CoordType refCoord = coordTestParticle[bid];
TypeType refType = typeTestParticle[bid];
ScalorType myPoten (0.0f);
IndexType refCelli, refCellj, refCellk;
refCelli = IndexType (refCoord.x * box.sizei.x * ScalorType(clist.NCell.x));
refCellj = IndexType (refCoord.y * box.sizei.y * ScalorType(clist.NCell.y));
refCellk = IndexType (refCoord.z * box.sizei.z * ScalorType(clist.NCell.z));
if (refCelli == clist.NCell.x){
refCelli -= clist.NCell.x;
refCoord.x -= box.size.x;
}
if (refCellj == clist.NCell.y){
refCellj -= clist.NCell.y;
refCoord.y -= box.size.y;
}
if (refCellk == clist.NCell.z){
refCellk -= clist.NCell.z;
refCoord.z -= box.size.z;
}
IndexType refCellIndex = D3toD1 (clist.NCell, refCelli, refCellj, refCellk);
for (IndexType i = 0; i < clist.numNeighborCell[refCellIndex]; ++i){
__syncthreads ();
IndexType targetCellIdx = getNeighborCellIndex (clist, refCellIndex, i);
CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, refCellIndex, i);
CoordType shift;
shift.x = shiftNoi.x * box.size.x;
shift.y = shiftNoi.y * box.size.y;
shift.z = shiftNoi.z * box.size.z;
IndexType targetIndex = getDeviceCellListData(clist, targetCellIdx, tid);
if (targetIndex != MaxIndexValue){
TypeType targettype = tex1Dfetch(global_texRef_interaction_type, targetIndex);
if (refType == targettype){
CoordType targetCoord = tex1Dfetch(global_texRef_interaction_coord, targetIndex);
ScalorType diffx = targetCoord.x - shift.x - refCoord.x;
ScalorType diffy = targetCoord.y - shift.y - refCoord.y;
ScalorType diffz = targetCoord.z - shift.z - refCoord.z;
ScalorType dr2 = ((diffx*diffx+diffy*diffy+diffz*diffz));
if (dr2 < clist.rlist*clist.rlist && dr2 > 1e-4){
IndexType fidx(0);
ScalorType dp;
fidx = AtomNBForceTable::
calForceIndex (const_nonBondedInteractionTable,
const_numAtomType[0],
refType,
refType);
nbPoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz, &dp);
myPoten += dp;
// printf ("dp: %f, %f %f %f\n", dp, diffx, diffy, diffz);
}
}
}
}
sumbuff[tid] = myPoten;
__syncthreads();
sumVectorBlockBuffer_2 (sumbuff);
__syncthreads();
if (tid == 0){
statistic_nb_buff0[bid] = sumbuff[0];
}
}
// if (tid == 0){
// // printf ("### du is %f\n", sumbuff[0]);
// statistic_nb_buff0[bid] = expf(- (sumbuff[0] + energyCorrection) / temperature);
// }
// }
__global__ void
widomDeltaPoten_allPair_NVT (const IndexType numTestParticle,
const CoordType * coordTestParticle,
const TypeType * typeTestParticle,
const IndexType numAtom,
const CoordType * coord,
const TypeType * type,
const RectangularBox box,
const ScalorType rlist,
ScalorType * statistic_nb_buff0,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
// IndexType ii = tid + bid * blockDim.x;
if (bid >= numTestParticle) return;
CoordType refCoord = coordTestParticle[bid];
TypeType refType = typeTestParticle[bid];
ScalorType myPoten = 0.;
extern __shared__ volatile ScalorType sumbuff [];
for (IndexType start = 0; start < numAtom; start += blockDim.x){
IndexType targetIndex = start + tid;
if (targetIndex >= numAtom) break;
TypeType targetType = type[targetIndex];
if (targetType != refType) continue;
CoordType targetCoord = coord[targetIndex];
ScalorType diffx = targetCoord.x - refCoord.x;
ScalorType diffy = targetCoord.y - refCoord.y;
ScalorType diffz = targetCoord.z - refCoord.z;
RectangularBoxGeometry::shortestImage (box, &diffx, &diffy, &diffz);
ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz);
if (dr2 < rlist * rlist && dr2 > 1e-4 ){
IndexType fidx(0);
ScalorType dp;
fidx = AtomNBForceTable::
calForceIndex (const_nonBondedInteractionTable,
const_numAtomType[0],
refType,
refType);
nbPoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz, &dp);
myPoten += dp;
}
}
sumbuff[tid] = myPoten;
__syncthreads();
sumVectorBlockBuffer_2 (sumbuff);
__syncthreads();
if (tid == 0){
statistic_nb_buff0[bid] = sumbuff[0];
}
}
// if (tid == 0){
// // printf ("### du is %f\n", sumbuff[0]);
// statistic_nb_buff0[bid] = expf(- (sumbuff[0] + energyCorrection) / temperature);
// }
// }
| bd273f4d7ee62fb3f5dedff8430c22b72a99cc97.cu | #define DEVICE_CODE
#include "systemDefines.h"
#include "InteractionEngine_interface.h"
#include "NonBondedInteraction.h"
#include "BondInteraction.h"
#include "AngleInteraction.h"
// #include "CellList_interface.h"
#include "Auxiliary.h"
texture<CoordType, 1, cudaReadModeElementType> global_texRef_interaction_coord;
texture<TypeType , 1, cudaReadModeElementType> global_texRef_interaction_type;
__constant__
InteractionType nonBondedInteractionType [MaxNumberNonBondedInteraction];
__constant__
ScalorType nonBondedInteractionParameter [MaxNumberNonBondedInteractionParameter];
__constant__
IndexType nonBondedInteractionParameterPosition [MaxNumberNonBondedInteraction];
__constant__
InteractionType bondedInteractionType [MaxNumberBondedInteraction];
__constant__
IndexType bondedInteractionParameterPosition [MaxNumberBondedInteraction];
__constant__
ScalorType bondedInteractionParameter [MaxNumberBondedInteractionParamemter];
__constant__
IndexType const_nonBondedInteractionTableLength[1];
__constant__
IndexType const_numAtomType[1];
__constant__
IndexType const_nonBondedInteractionTable [MaxLengthNonBondedInteractionTable];
void InteractionEngine::init (const MDSystem & sys,
const IndexType & NTread)
{
hasBond = false;
hasAngle = false;
myBlockDim.y = 1;
myBlockDim.z = 1;
myBlockDim.x = NTread;
IndexType nob;
if (sys.ddata.numAtom % myBlockDim.x == 0){
nob = sys.ddata.numAtom / myBlockDim.x;
} else {
nob = sys.ddata.numAtom / myBlockDim.x + 1;
}
atomGridDim = toGridDim (nob);
// size_t sizetype = sizeof(TypeType)*sys.ddata.numMem;
cudaBindTexture(0, global_texRef_interaction_coord, sys.ddata.coord,
sizeof(CoordType) * sys.ddata.numMem);
cudaBindTexture(0, global_texRef_interaction_type, sys.ddata.type,
sizeof(TypeType) * sys.ddata.numMem);
checkCUDAError ("InteractionEngine::init, bind texture");
// init sum vectors
sum_nb_p.reinit (sys.ddata.numAtom, NThreadForSum);
sum_nb_vxx.reinit (sys.ddata.numAtom, NThreadForSum);
sum_nb_vyy.reinit (sys.ddata.numAtom, NThreadForSum);
sum_nb_vzz.reinit (sys.ddata.numAtom, NThreadForSum);
sum_b_p.reinit (nob, NThreadForSum);
sum_b_vxx.reinit (nob, NThreadForSum);
sum_b_vyy.reinit (nob, NThreadForSum);
sum_b_vzz.reinit (nob, NThreadForSum);
sum_angle_p.reinit (nob, NThreadForSum);
sum_angle_vxx.reinit (nob, NThreadForSum);
sum_angle_vyy.reinit (nob, NThreadForSum);
sum_angle_vzz.reinit (nob, NThreadForSum);
for (IndexType i = 0; i < 8; ++i){
cudaStreamCreate(&sum_stream[i]);
}
checkCUDAError ("InteractionEngine::init init sum statistic");
// exclusion list
maxNumExclusion = 0;
sharedExclusionList = false;
exclusion_sbuffSize = size_t(0);
}
static IndexType hroundUp4 (const IndexType x)
{
if (x & 3 == 0){
return x;
}
else {
return ((x >> 2) + 1) << 2;
}
}
void InteractionEngine::
registNonBondedInteraction (const SystemNonBondedInteraction & sysNbInter)
{
if (! sysNbInter.beBuilt()) {
throw MDExcptUnbuiltNonBondedInteraction ("InteractionEngine");
}
if (sysNbInter.numberOfInteraction() > MaxNumberBondedInteraction ){
throw MDExcptExceedConstantMemLimit (
"InteractionEngine::registNonBondedInteraction",
"nonBonedInteractionType",
MaxNumberNonBondedInteraction * sizeof(InteractionType));
}
if (sysNbInter.numberOfParameter() > MaxNumberNonBondedInteractionParameter ){
throw MDExcptExceedConstantMemLimit (
"InteractionEngine::registNonBondedInteraction",
"nonBondedInteractionParameter",
MaxNumberNonBondedInteractionParameter * sizeof(ScalorType));
}
cudaMemcpyToSymbol (nonBondedInteractionType,
sysNbInter.interactionType(),
sizeof(InteractionType) * sysNbInter.numberOfInteraction());
cudaMemcpyToSymbol (nonBondedInteractionParameterPosition,
sysNbInter.interactionParameterPosition(),
sizeof(ScalorType) * sysNbInter.numberOfInteraction());
cudaMemcpyToSymbol (nonBondedInteractionParameter,
sysNbInter.interactionParameter(),
sizeof(IndexType) * sysNbInter.numberOfParameter());
checkCUDAError ("InteractionEngine::init, init NB force setting");
IndexType tableSize = sysNbInter.interactionTableSize();
IndexType tmpNumAtomType = sysNbInter.numberOfAtomTypes();
if (tableSize > MaxLengthNonBondedInteractionTable){
throw MDExcptExceedConstantMemLimit(
"InteractionEngine::registNonBondedInteraction",
"nonBondedInteractionTable",
MaxLengthNonBondedInteractionTable * sizeof (ScalorType));
}
cudaMemcpyToSymbol (const_nonBondedInteractionTableLength,
&tableSize,
sizeof (IndexType));
checkCUDAError ("InteractionEngine::init, const_nonBondedInteractionTableLength");
cudaMemcpyToSymbol (const_numAtomType,
&tmpNumAtomType,
sizeof (IndexType));
checkCUDAError ("InteractionEngine::init, const_numAtomType");
cudaMemcpyToSymbol (const_nonBondedInteractionTable,
sysNbInter.interactionTable(),
sizeof (IndexType) * tableSize);
checkCUDAError ("InteractionEngine::init, const_nonBondedInteractionTable");
// applyNonBondedInteraction_CellList_sbuffSize =
// sizeof(IndexType) * hroundUp4(myBlockDim.x) +
// sizeof(CoordType) * hroundUp4(myBlockDim.x) +
// sizeof(TypeType) * hroundUp4(myBlockDim.x);
// printf ("total %d\npart1 %d\npart2 %d\npart3 %d\nround %d\n",
// applyNonBondedInteraction_CellList_sbuffSize,
// sizeof(IndexType) * hroundUp4(myBlockDim.x),
// sizeof(CoordType) * hroundUp4(myBlockDim.x),
// sizeof(TypeType) * hroundUp4(myBlockDim.x),
// hroundUp4(myBlockDim.x));
// checkCUDAError ("InteractionEngine::init, init nonBondedInteractionTable");
energyCorr = sysNbInter.energyCorrection ();
pressureCorr = sysNbInter.pressureCorrection ();
maxNumExclusion = sysNbInter.maxNumberOfExclusion();
if (maxNumExclusion != 0){
sharedExclusionList = true;
exclusion_sbuffSize = myBlockDim.x * maxNumExclusion * sizeof(IndexType);
if (exclusion_sbuffSize > SystemSharedBuffSize){
sharedExclusionList = false;
}
}
}
void InteractionEngine::
registBondedInteraction (const SystemBondedInteraction & sysBdInter)
{
if (sysBdInter.hasBond() ){
hasBond = true;
}
if (sysBdInter.hasAngle()){
hasAngle = true;
}
if (sysBdInter.numberOfInteraction() > MaxNumberBondedInteraction ){
throw MDExcptExceedConstantMemLimit (
"InteractionEngine::registBondedInteraction",
"bondedInteractionType",
MaxNumberBondedInteraction * sizeof(InteractionType));
}
if (sysBdInter.numberOfParameter() > MaxNumberBondedInteractionParamemter ){
throw MDExcptExceedConstantMemLimit (
"InteractionEngine::registBondedInteraction",
"bondedInteractionParameter",
MaxNumberBondedInteractionParamemter * sizeof(ScalorType));
}
if (hasBond || hasAngle){
cudaMemcpyToSymbol (bondedInteractionType,
sysBdInter.interactionType(),
sizeof(InteractionType) * sysBdInter.numberOfInteraction());
cudaMemcpyToSymbol (bondedInteractionParameterPosition,
sysBdInter.interactionParameterPosition(),
sizeof(ScalorType) * sysBdInter.numberOfInteraction());
cudaMemcpyToSymbol (bondedInteractionParameter,
sysBdInter.interactionParameter(),
sizeof(IndexType) * sysBdInter.numberOfParameter());
checkCUDAError ("InteractionEngine::init, init bond force setting");
// cal shared buff size
calBondInteraction_sbuffSize = myBlockDim.x * sizeof(ScalorType);
calAngleInteraction_sbuffSize = myBlockDim.x * sizeof(ScalorType);
}
}
InteractionEngine::~InteractionEngine()
{
cudaUnbindTexture(global_texRef_interaction_coord);
cudaUnbindTexture(global_texRef_interaction_type);
for (IndexType i = 0; i < 8; ++i){
cudaStreamDestroy(sum_stream[i]);
}
}
void InteractionEngine::clearInteraction (MDSystem & sys)
{
clearForce
<<<atomGridDim, myBlockDim>>>(
sys.ddata.numAtom,
sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz);
checkCUDAError ("InteractionEngine::clearInteraction");
}
// nblock should be 1 and block size should be 1
__global__ void
applyEnergyPressureCorrection (ScalorType * ddata,
ScalorType energyCorr,
ScalorType pressureCorr)
{
ddata[mdStatisticEnergyCorrection] = energyCorr;
ddata[mdStatisticPressureCorrection] = pressureCorr;
}
void InteractionEngine::
applyNonBondedInteraction (MDSystem & sys,
const NeighborList & nlist,
const ExclusionList * excllist,
MDTimer *timer )
{
if (timer != NULL) timer->tic(mdTimeNonBondedInteraction);
if (excllist == NULL){
calNonBondedInteraction_neighbor
<<<atomGridDim, myBlockDim>>> (
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx,
sys.ddata.forcy,
sys.ddata.forcz,
sys.ddata.type,
sys.ddata.rcut,
sys.box,
nlist.dnlist);
}
else{
calNonBondedInteraction_neighbor
<<<atomGridDim, myBlockDim,
exclusion_sbuffSize>>> (
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx,
sys.ddata.forcy,
sys.ddata.forcz,
sys.ddata.type,
sys.box,
nlist.dnlist,
excllist->dexcllist,
sharedExclusionList
);
}
checkCUDAError ("InteractionEngine::applyInteraction nb");
err.check ("interaction engine nb");
if (timer != NULL) timer->toc(mdTimeNonBondedInteraction);
}
// void InteractionEngine::
// applyNonBondedInteraction (MDSystem & sys,
// const CellList & clist,
// const ScalorType & rcut,
// NeighborList & nlist,
// MDTimer *timer )
// {
// if (timer != NULL) timer->tic(mdTimeBuildNeighborList);
// size_t applyNonBondedInteraction_CellList_sbuffSize =
// (sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) *
// hroundUp4(clist.getBlockDim().x);
// // sizeof(IndexType) * hroundUp4(myBlockDim.x) +
// // sizeof(CoordType) * hroundUp4(myBlockDim.x) +
// // sizeof(TypeType) * hroundUp4(myBlockDim.x);
// calNonBondedInteraction
// <<<clist.getCellGrimDim(), clist.getBlockDim(),
// applyNonBondedInteraction_CellList_sbuffSize>>> (
// sys.ddata.numAtom,
// sys.ddata.coord,
// sys.ddata.forcx,
// sys.ddata.forcy,
// sys.ddata.forcz,
// sys.ddata.type,
// sys.box,
// clist.dclist,
// rcut,
// nlist.dnlist,
// err.ptr_de);
// checkCUDAError ("InteractionEngine::applyInteraction nb");
// err.check ("interaction engine nb");
// if (timer != NULL) timer->toc(mdTimeBuildNeighborList);
// }
void InteractionEngine::
applyNonBondedInteraction (MDSystem & sys,
const NeighborList & nlist,
MDStatistic & st,
const ExclusionList * excllist,
MDTimer *timer )
{
if (timer != NULL) timer->tic(mdTimeNBInterStatistic);
if (excllist == NULL){
calNonBondedInteraction_neighbor
<<<atomGridDim, myBlockDim>>> (
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx,
sys.ddata.forcy,
sys.ddata.forcz,
sys.ddata.type,
sys.ddata.rcut,
sys.box,
nlist.dnlist
,
sum_nb_p.buff,
sum_nb_vxx.buff,
sum_nb_vyy.buff,
sum_nb_vzz.buff
);
}
else {
calNonBondedInteraction_neighbor
<<<atomGridDim, myBlockDim,
exclusion_sbuffSize>>> (
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx,
sys.ddata.forcy,
sys.ddata.forcz,
sys.ddata.type,
sys.box,
nlist.dnlist
,
excllist->dexcllist,
sharedExclusionList,
sum_nb_p.buff,
sum_nb_vxx.buff,
sum_nb_vyy.buff,
sum_nb_vzz.buff
);
}
checkCUDAError ("InteractionEngine::applyInteraction nb (with statistic)");
err.check ("interaction engine nb");
cudaThreadSynchronize();
sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0);
sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX);
sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY);
sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ);
ScalorType volumei = sys.box.size.x * sys.box.size.y * sys.box.size.z;
volumei = 1.f / volumei;
// printf ("apply Ec %f, Pc %f\n",
// energyCorr * volumei,
// pressureCorr * volumei * volumei);
applyEnergyPressureCorrection
<<<1, 1, 0>>> (st.ddata,
energyCorr * volumei,
pressureCorr * volumei * volumei);
cudaThreadSynchronize();
if (timer != NULL) timer->toc(mdTimeNBInterStatistic);
}
void InteractionEngine::
applyNonBondedInteraction (MDSystem & sys,
const CellList & clist,
const ScalorType & rcut,
MDTimer *timer )
{
if (!clist.isempty()){
if (timer != NULL) timer->tic(mdTimeNonBondedInteraction);
size_t applyNonBondedInteraction_CellList_sbuffSize =
(sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) *
hroundUp4(clist.getCellBlockDim().x);
calNonBondedInteraction_cell
<<<clist.getCellGrimDim(), clist.getCellBlockDim(),
applyNonBondedInteraction_CellList_sbuffSize>>> (
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx,
sys.ddata.forcy,
sys.ddata.forcz,
sys.ddata.type,
sys.box,
clist.dclist,
rcut,
err.ptr_de);
checkCUDAError ("InteractionEngine::applyInteraction nb");
err.check ("interaction engine nb");
if (timer != NULL) timer->toc(mdTimeNonBondedInteraction);
}
else {
applyNonBondedInteraction (sys, rcut, timer);
}
}
void InteractionEngine::
applyNonBondedInteraction (MDSystem & sys,
const CellList & clist,
const ScalorType & rcut,
MDStatistic & st,
MDTimer *timer )
{
if (!clist.isempty()){
if (timer != NULL) timer->tic(mdTimeNBInterStatistic);
size_t applyNonBondedInteraction_CellList_sbuffSize =
(sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) *
hroundUp4(clist.getCellBlockDim().x);
calNonBondedInteraction_cell
<<<clist.getCellGrimDim(), clist.getCellBlockDim(),
applyNonBondedInteraction_CellList_sbuffSize>>> (
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx,
sys.ddata.forcy,
sys.ddata.forcz,
sys.ddata.type,
sys.box,
clist.dclist,
rcut,
sum_nb_p.buff,
sum_nb_vxx.buff,
sum_nb_vyy.buff,
sum_nb_vzz.buff,
err.ptr_de
);
checkCUDAError ("InteractionEngine::applyInteraction nb (with statistic)");
err.check ("interaction engine nb");
cudaThreadSynchronize();
sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0);
sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX);
sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY);
sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ);
ScalorType volumei = sys.box.size.x * sys.box.size.y * sys.box.size.z;
volumei = 1.f / volumei;
// printf ("apply Ec %f, Pc %f\n",
// energyCorr * volumei,
// pressureCorr * volumei * volumei);
applyEnergyPressureCorrection
<<<1, 1, 0>>> (st.ddata,
energyCorr * volumei,
pressureCorr * volumei * volumei);
cudaThreadSynchronize();
if (timer != NULL) timer->toc(mdTimeNBInterStatistic);
}
else {
applyNonBondedInteraction (sys, rcut, st, timer);
}
}
void InteractionEngine::
applyNonBondedInteraction (MDSystem & sys,
const ScalorType & rcut,
MDTimer *timer )
{
if (timer != NULL) timer->tic(mdTimeNonBondedInteraction);
size_t applyNonBondedInteraction_AllPair_sbuffSize =
(sizeof(CoordType) + sizeof(TypeType)) *
hroundUp4(myBlockDim.x);
calNonBondedInteraction_all
<<<atomGridDim, myBlockDim,
applyNonBondedInteraction_AllPair_sbuffSize>>> (
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx,
sys.ddata.forcy,
sys.ddata.forcz,
sys.ddata.type,
sys.box,
rcut,
err.ptr_de);
checkCUDAError ("InteractionEngine::applyInteraction nb");
err.check ("interaction engine nb");
if (timer != NULL) timer->toc(mdTimeNonBondedInteraction);
}
void InteractionEngine::
applyNonBondedInteraction (MDSystem & sys,
const ScalorType & rcut,
MDStatistic & st,
MDTimer *timer )
{
if (timer != NULL) timer->tic(mdTimeNBInterStatistic);
size_t applyNonBondedInteraction_AllPair_sbuffSize =
(sizeof(CoordType) + sizeof(TypeType)) *
hroundUp4(myBlockDim.x);
calNonBondedInteraction_all
<<<atomGridDim, myBlockDim,
applyNonBondedInteraction_AllPair_sbuffSize>>> (
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx,
sys.ddata.forcy,
sys.ddata.forcz,
sys.ddata.type,
sys.box,
rcut,
sum_nb_p.buff,
sum_nb_vxx.buff,
sum_nb_vyy.buff,
sum_nb_vzz.buff,
err.ptr_de);
checkCUDAError ("InteractionEngine::applyInteraction nb (with statistic)");
err.check ("interaction engine nb");
cudaThreadSynchronize();
sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0);
sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX);
sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY);
sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ);
ScalorType volumei = sys.box.size.x * sys.box.size.y * sys.box.size.z;
volumei = 1.f / volumei;
// printf ("apply Ec %f, Pc %f\n",
// energyCorr * volumei,
// pressureCorr * volumei * volumei);
applyEnergyPressureCorrection
<<<1, 1, 0>>> (st.ddata,
energyCorr * volumei,
pressureCorr * volumei * volumei);
cudaThreadSynchronize();
if (timer != NULL) timer->toc(mdTimeNBInterStatistic);
}
// void InteractionEngine::
// applyNonBondedInteraction (MDSystem & sys,
// const CellList & clist,
// const ScalorType & rcut,
// NeighborList & nlist,
// MDStatistic & st,
// MDTimer *timer )
// {
// if (timer != NULL) timer->tic(mdTimeBuildNeighborList);
// if (!clist.isempty()){
// size_t applyNonBondedInteraction_CellList_sbuffSize =
// (sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) *
// hroundUp4(clist.getBlockDim().x);
// calNonBondedInteraction
// <<<clist.getCellGrimDim(), clist.getBlockDim(),
// applyNonBondedInteraction_CellList_sbuffSize>>> (
// sys.ddata.numAtom,
// sys.ddata.coord,
// sys.ddata.forcx,
// sys.ddata.forcy,
// sys.ddata.forcz,
// sys.ddata.type,
// sys.box,
// clist.dclist,
// rcut,
// nlist.dnlist,
// sum_nb_p.buff,
// sum_nb_vxx.buff,
// sum_nb_vyy.buff,
// sum_nb_vzz.buff,
// err.ptr_de
// );
// }
// checkCUDAError ("InteractionEngine::applyInteraction nb (with statistic)");
// err.check ("interaction engine nb");
// cudaThreadSynchronize();
// sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0);
// sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX, 1);
// sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY, 2);
// sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ, 3);
// ScalorType volumei = sys.box.size.x * sys.box.size.y * sys.box.size.z;
// volumei = 1.f / volumei;
// // printf ("apply Ec %f, Pc %f\n",
// // energyCorr * volumei,
// // pressureCorr * volumei * volumei);
// applyEnergyPressureCorrection
// <<<1, 1, 0, 4>>> (st.ddata,
// energyCorr * volumei,
// pressureCorr * volumei * volumei);
// cudaThreadSynchronize();
// if (timer != NULL) timer->toc(mdTimeBuildNeighborList);
// }
void InteractionEngine::
calTwinRangeCorrection (const MDSystem & sys,
const CellList & clist,
const ScalorType & rcut1,
const ScalorType & rcut2,
TwinRangeCorrectionRecorder & twrec,
MDTimer * timer)
{
if (timer != NULL) timer->tic(mdTimeNBInterTwinRange);
if (clist.isempty()){
size_t applyNonBondedInteraction_AllPair_sbuffSize =
(sizeof(CoordType) + sizeof(TypeType)) *
hroundUp4(myBlockDim.x);
calTwinRangeCorrection_all
<<<atomGridDim, myBlockDim,
applyNonBondedInteraction_AllPair_sbuffSize>>> (
sys.ddata.numAtom,
sys.ddata.coord,
twrec.forcx,
twrec.forcy,
twrec.forcz,
sys.ddata.type,
sys.box,
rcut1,
rcut2,
sum_nb_p.buff,
sum_nb_vxx.buff,
sum_nb_vyy.buff,
sum_nb_vzz.buff,
err.ptr_de);
}
else {
size_t applyNonBondedInteraction_CellList_sbuffSize =
(sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) *
hroundUp4(clist.getCellBlockDim().x);
calTwinRangeCorrection_cell
<<<clist.getCellGrimDim(), clist.getCellBlockDim(),
applyNonBondedInteraction_CellList_sbuffSize>>> (
sys.ddata.numAtom,
sys.ddata.coord,
twrec.forcx,
twrec.forcy,
twrec.forcz,
sys.ddata.type,
sys.box,
clist.dclist,
rcut1,
rcut2,
sum_nb_p.buff,
sum_nb_vxx.buff,
sum_nb_vyy.buff,
sum_nb_vzz.buff,
err.ptr_de);
}
checkCUDAError ("TwinRangeCorrectionRecorder::calTwinRangeCorrection");
err.check ("TwinRangeCorrectionRecorder::calTwinRangeCorrection");
cudaThreadSynchronize();
MDStatistic st (sys);
sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0);
sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX);
sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY);
sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ);
cudaThreadSynchronize();
st.updateHost ();
twrec.energyCorrection() = st.nonBondedEnergy();
twrec.pressureCorrection() = st.pressure(sys.box);
if (timer != NULL) timer->toc(mdTimeNBInterTwinRange);
}
void InteractionEngine::
buildNeighborListCalTwinRangeCorrection (const MDSystem & sys,
const CellList & clist,
const ScalorType & rcut1,
const ScalorType & rcut2,
NeighborList & nlist,
TwinRangeCorrectionRecorder & twrec,
MDTimer * timer)
{
if (timer != NULL) timer->tic(mdTimeBuildNeighborList);
if (clist.isempty()){
size_t applyNonBondedInteraction_AllPair_sbuffSize =
(sizeof(CoordType) + sizeof(TypeType)) *
hroundUp4(myBlockDim.x);
buildNeighborListCalTwinRangeCorr_all
<<<atomGridDim, myBlockDim,
applyNonBondedInteraction_AllPair_sbuffSize>>> (
sys.ddata.numAtom,
sys.ddata.coord,
twrec.forcx,
twrec.forcy,
twrec.forcz,
sys.ddata.type,
sys.box,
rcut1,
rcut2,
nlist.dnlist,
sum_nb_p.buff,
sum_nb_vxx.buff,
sum_nb_vyy.buff,
sum_nb_vzz.buff,
err.ptr_de);
}
else {
size_t applyNonBondedInteraction_CellList_sbuffSize =
(sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) *
hroundUp4(clist.getCellBlockDim().x);
buildNeighborListCalTwinRangeCorr_cell
<<<clist.getCellGrimDim(), clist.getCellBlockDim(),
applyNonBondedInteraction_CellList_sbuffSize>>> (
sys.ddata.numAtom,
sys.ddata.coord,
twrec.forcx,
twrec.forcy,
twrec.forcz,
sys.ddata.type,
sys.box,
clist.dclist,
rcut1,
rcut2,
nlist.dnlist,
sum_nb_p.buff,
sum_nb_vxx.buff,
sum_nb_vyy.buff,
sum_nb_vzz.buff,
err.ptr_de);
}
checkCUDAError ("TwinRangeCorrectionRecorder::calTwinRangeCorrection");
err.check ("TwinRangeCorrectionRecorder::calTwinRangeCorrection");
cudaThreadSynchronize();
MDStatistic st (sys);
sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0);
sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX);
sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY);
sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ);
cudaThreadSynchronize();
st.updateHost ();
twrec.energyCorrection() = st.nonBondedEnergy();
twrec.pressureCorrection() = st.pressure(sys.box);
if (timer != NULL) timer->toc(mdTimeBuildNeighborList);
}
void InteractionEngine::
applyBondedInteraction (MDSystem & sys,
const BondedInteractionList & bdlist,
MDTimer *timer )
{
if (hasBond) {
if (timer != NULL) timer->tic(mdTimeBondedInteraction);
calBondInteraction
<<<atomGridDim, myBlockDim>>> (
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz,
sys.box,
bdlist.dbondlist);
checkCUDAError ("InteractionEngine::applyInteraction bonded");
err.check ("interaction engine b");
if (timer != NULL) timer->toc(mdTimeBondedInteraction);
}
if (hasAngle){
if (timer != NULL) timer->tic(mdTimeAngleInteraction);
calAngleInteraction
<<<atomGridDim, myBlockDim>>> (
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz,
sys.box,
bdlist.danglelist);
checkCUDAError ("InteractionEngine::applyInteraction angle");
err.check ("interaction engine angle");
if (timer != NULL) timer->toc(mdTimeAngleInteraction);
}
}
void InteractionEngine::
applyBondedInteraction (MDSystem & sys,
const BondedInteractionList & bdlist,
MDStatistic & st,
MDTimer *timer)
{
if (hasBond) {
if (timer != NULL) timer->tic(mdTimeBInterStatistic);
calBondInteraction
<<<atomGridDim, myBlockDim,
calBondInteraction_sbuffSize>>> (
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz,
sys.box,
bdlist.dbondlist
,
sum_b_p.buff,
sum_b_vxx.buff,
sum_b_vyy.buff,
sum_b_vzz.buff,
err.ptr_de
);
checkCUDAError ("InteractionEngine::applyInteraction bonded (with statistic)");
err.check ("interaction engine");
if (timer != NULL) timer->toc(mdTimeBInterStatistic);
}
if (hasBond) {
if (timer != NULL) timer->tic(mdTimeBInterStatistic);
cudaThreadSynchronize();
sum_b_p.sumBuffAdd(st.ddata, mdStatisticBondedPotential);
sum_b_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX);
sum_b_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY);
sum_b_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ);
cudaThreadSynchronize();
if (timer != NULL) timer->toc(mdTimeBInterStatistic);
checkCUDAError ("InteractionEngine::applyInteraction sum bond statistic (with statistic)");
}
if (hasAngle){
if (timer != NULL) timer->tic(mdTimeAngleInterStatistic);
calAngleInteraction
<<<atomGridDim, myBlockDim,
calAngleInteraction_sbuffSize>>> (
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz,
sys.box,
bdlist.danglelist,
sum_angle_p.buff,
sum_angle_vxx.buff,
sum_angle_vyy.buff,
sum_angle_vzz.buff,
err.ptr_de);
checkCUDAError ("InteractionEngine::applyInteraction angle");
err.check ("interaction engine angle");
if (timer != NULL) timer->toc(mdTimeAngleInterStatistic);
}
if (hasAngle){
if (timer != NULL) timer->tic(mdTimeAngleInterStatistic);
sum_angle_p.sumBuffAdd(st.ddata, mdStatisticBondedPotential);
sum_angle_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX);
sum_angle_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY);
sum_angle_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ);
cudaThreadSynchronize();
if (timer != NULL) timer->toc(mdTimeAngleInterStatistic);
checkCUDAError ("InteractionEngine::applyInteraction sum angle statistic (with statistic)");
}
}
// void InteractionEngine::
// calculateWidomDeltaEnergy (const MDSystem & sys,
// const NeighborList & nlist,
// WidomTestParticleInsertion_NVT & wtest,
// MDTimer * timer )
// {
// if (timer != NULL) timer->tic(mdTimeNBInterStatistic);
// // printf ("### %d\n", nlist.mode);
// if (nlist.mode == CellListBuilt){
// // printf ("### here %f\n", wtest.energyCorrection());
// widomDeltaPoten_NVT
// <<<toGridDim(wtest.numTestParticle()),
// nlist.myBlockDim.x,
// nlist.myBlockDim.x * sizeof(ScalorType)>>> (
// wtest.numTestParticle(),
// wtest.coordTestParticle,
// wtest.typeTestParticle,
// sys.ddata.numAtom,
// sys.ddata.coord,
// sys.ddata.type,
// sys.box,
// nlist.dclist,
// wtest.sumExpDeltaU.buff,
// err.ptr_de);
// }
// else if (nlist.mode == AllPairBuilt){
// // printf ("### here %f\n", wtest.energyCorrection());
// widomDeltaPoten_allPair_NVT
// <<<toGridDim(wtest.numTestParticle()),
// DefaultNThreadPerBlock,
// DefaultNThreadPerBlock * sizeof(ScalorType)>>> (
// wtest.numTestParticle(),
// wtest.coordTestParticle,
// wtest.typeTestParticle,
// sys.ddata.numAtom,
// sys.ddata.coord,
// sys.ddata.type,
// sys.box,
// nlist.myrlist,
// wtest.sumExpDeltaU.buff,
// err.ptr_de);
// }
// if (timer != NULL) timer->toc(mdTimeNBInterStatistic);
// }
// void InteractionEngine::
// calculateWidomDeltaEnergy (const MDSystem & sys,
// const NeighborList & nlist,
// WidomTestParticleInsertion_NVT2 & wtest,
// MDTimer * timer )
// {
// if (timer != NULL) timer->tic(mdTimeNBInterStatistic);
// // printf ("### %d\n", nlist.mode);
// if (nlist.mode == CellListBuilt){
// // printf ("### here %f\n", wtest.energyCorrection());
// widomDeltaPoten_NVT
// <<<toGridDim(wtest.numTestParticle()),
// nlist.myBlockDim.x,
// nlist.myBlockDim.x * sizeof(ScalorType)>>> (
// wtest.numTestParticle(),
// wtest.coordTestParticle,
// wtest.typeTestParticle,
// sys.ddata.numAtom,
// sys.ddata.coord,
// sys.ddata.type,
// sys.box,
// nlist.dclist,
// wtest.sumExpDeltaU.buff,
// err.ptr_de);
// }
// if (timer != NULL) timer->toc(mdTimeNBInterStatistic);
// }
// void InteractionEngine::
// calculateWidomDeltaEnergy (const MDSystem & sys,
// const NeighborList & nlist,
// WidomTestParticleInsertion_NPT & wtest,
// MDTimer * timer )
// {
// if (timer != NULL) timer->tic(mdTimeNBInterStatistic);
// // printf ("### %d\n", nlist.mode);
// if (nlist.mode == CellListBuilt){
// // printf ("### here %f, n: %d\n", wtest.energyCorrection(), wtest.numTestParticle());
// widomDeltaPoten_NVT
// <<<toGridDim(wtest.numTestParticle()),
// nlist.myBlockDim.x,
// nlist.myBlockDim.x * sizeof(ScalorType)>>> (
// wtest.numTestParticle(),
// wtest.coordTestParticle,
// wtest.typeTestParticle,
// sys.ddata.numAtom,
// sys.ddata.coord,
// sys.ddata.type,
// sys.box,
// nlist.dclist,
// wtest.sumExpDeltaU.buff,
// err.ptr_de);
// }
// else if (nlist.mode == AllPairBuilt){
// // printf ("### here %f\n", wtest.energyCorrection());
// widomDeltaPoten_allPair_NVT
// <<<toGridDim(wtest.numTestParticle()),
// DefaultNThreadPerBlock,
// DefaultNThreadPerBlock * sizeof(ScalorType)>>> (
// wtest.numTestParticle(),
// wtest.coordTestParticle,
// wtest.typeTestParticle,
// sys.ddata.numAtom,
// sys.ddata.coord,
// sys.ddata.type,
// sys.box,
// nlist.myrlist,
// wtest.sumExpDeltaU.buff,
// err.ptr_de);
// }
// // for (unsigned i = 0; i < wtest.numTestParticle(); ++i){
// // printf ("%d %f (%f %f %f)\n", i,
// // wtest.sumExpDeltaU.buff[i],
// // wtest.coordTestParticle[i].x,
// // wtest.coordTestParticle[i].y,
// // wtest.coordTestParticle[i].z
// // );
// // }
// if (timer != NULL) timer->toc(mdTimeNBInterStatistic);
// }
__global__ void clearForce (const IndexType numAtom,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType ii = threadIdx.x + bid * blockDim.x;
if (ii < numAtom) {
forcx[ii] = 0.0f;
forcy[ii] = 0.0f;
forcz[ii] = 0.0f;
}
}
// __global__ void
// calNonBondedInteraction (const CoordType * coord,
// const TypeType * type,
// DeviceCellListData clist,
// DeviceCellListProperty clistPro,
// ScalorType * forcx,
// ScalorType * forcy,
// ScalorType * forcz,
// bool sharednbForceTable)
// {
// IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
// IndexType tid = threadIdx.x;
// ScalorType fsumx(0.f), fsumy(0.f), fsumz(0.f);
// extern __shared__ volatile char pub_sbuff[];
// volatile IndexType * targetIndex = (volatile IndexType *) pub_sbuff;
// CoordType * targetCoord = (CoordType *) &targetIndex[roundUp4(blockDim.x)];
// volatile TypeType * targetType = (volatile TypeType *) &targetCoord[roundUp4(blockDim.x)];
// __syncthreads();
// IndexType ii = get (clist, bid, tid);
// CoordType ref;
// TypeType refType;
// if (ii != MaxIndexValue){
// ref = tex1Dfetch (global_texRef_interaction_coord, ii);
// refType = tex1Dfetch(global_texRef_interaction_type, ii);
// }
// for (unsigned i = 0; i < numNeighborCell(clistPro, bid); ++i){
// __syncthreads();
// IndexType targetCellIndex = getTargetCellIndex (clistPro, bid, i);
// CoordType shift = getShiftValue (clistPro, bid, i);
// IndexType targetIndex[tid] = get (clist, targetCellIndex, tid);
// if (targetIndex[tid] != MaxIndexValue){
// targetCoord[tid] = tex1Dfetch (global_texRef_interaction_coord, targetIndexes[tid]);
// targetType[tid] = tex1Dfetch (global_texRef_interaction_type, targetIndexes[tid]);
// }
// __syncthreads ();
// if (ii != MaxIndexValue){
// for (IndexType jj = 0; jj < blockDim.x; ++jj){
// if (targetIndex[jj] == MaxIndexValue) continue;
// ScalorType diffx = targetCoord[jj].x + shift.x - ref.x;
// ScalorType diffy = targetCoord[jj].y + shift.y - ref.y;
// ScalorType diffz = targetCoord[jj].z + shift.z - ref.z;
// if ((diffx*diffx+diffy*diffy+diffz*diffz) < rlist2 &&
// targetIndex[jj] != ii){
// ForceIndexType fidx;
// if (sharednbForceTable){
// fidx = nonBondedInteractionTableItem (
// nonBondedInteractionTable, const_numAtomType, refType, targetType[jj]);
// }
// else {
// fidx = nonBondedInteractionTableItem (
// nonBondedInteractionTable, const_numAtomType, refType, targetType[jj]);
// }
// ScalorType fx, fy, fz;
// nbforce (nonBondedInteractionType[fidx],
// &nonBondedInteractionParameter
// [nonBondedInteractionParameterPosition[fidx]],
// diffx, diffy, diffz,
// &fx, &fy, &fz);
// fsumx += fx;
// fsumy += fy;
// fsumz += fz;
// }
// }
// }
// }
// if (ii != MaxIndexValue){
// forcx[ii] += fsumx;
// forcy[ii] += fsumy;
// forcz[ii] += fsumz;
// }
// }
__global__ void
calNonBondedInteraction_neighbor (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const ScalorType * rcut,
const RectangularBox box,
const DeviceNeighborList nlist)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
ScalorType fsumx = 0.0f;
ScalorType fsumy = 0.0f;
ScalorType fsumz = 0.0f;
IndexType ii = tid + bid * blockDim.x;
if (ii < numAtom) {
CoordType ref (tex1Dfetch(global_texRef_interaction_coord, ii));
ScalorType refrcut2 = rcut[ii];
refrcut2 = refrcut2 * refrcut2;
ScalorType fx(0.f), fy(0.f), fz(0.f);
for (IndexType jj = 0, nlistPosi = ii;
jj < nlist.Nneighbor[ii];
++jj, nlistPosi += nlist.stride){
IndexType targetIdx ( nlist.data [nlistPosi] );
IndexType nbForceIndex ( nlist.forceIndex [nlistPosi] );
CoordType target ( tex1Dfetch(global_texRef_interaction_coord, targetIdx) );
ScalorType diffx ( target.x - ref.x );
ScalorType diffy ( target.y - ref.y );
ScalorType diffz ( target.z - ref.z );
shortestImage (box, &diffx, &diffy, &diffz);
if (diffx*diffx + diffy*diffy + diffz*diffz < refrcut2){
nbForce (nonBondedInteractionType[nbForceIndex],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[nbForceIndex]],
diffx, diffy, diffz,
&fx, &fy, &fz);
fsumx += fx;
fsumy += fy;
fsumz += fz;
}
}
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
}
__global__ void
calNonBondedInteraction_neighbor (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const DeviceNeighborList nlist,
const DeviceExclusionList dexcllist,
const bool sharedExclusionList)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
ScalorType fsumx = 0.0f;
ScalorType fsumy = 0.0f;
ScalorType fsumz = 0.0f;
IndexType ii = tid + bid * blockDim.x;
IndexType * ptr_excllist;
IndexType myNumExclusion (0);
extern __shared__ char excl_sbuff[];
if (dexcllist.maxNumExclusion != 0 && ii < numAtom){
myNumExclusion = dexcllist.numExclusion[ii];
if (sharedExclusionList){
ptr_excllist = (IndexType *) excl_sbuff;
for (IndexType jj = 0; jj < myNumExclusion; ++jj){
ptr_excllist[jj*blockDim.x+tid] =
dexcllist.exclusionNeighborIndex[jj*dexcllist.stride+ii];
}
}
}
if (ii < numAtom) {
CoordType ref = tex1Dfetch(global_texRef_interaction_coord, ii);
ScalorType fx(0.f), fy(0.f), fz(0.f);
for (IndexType jj = 0, nlistPosi = ii;
jj < nlist.Nneighbor[ii];
++jj, nlistPosi += nlist.stride){
IndexType targetIdx ( nlist.data [nlistPosi] );
IndexType nbForceIndex;
CoordType target;
ScalorType diffx, diffy, diffz;
if (sharedExclusionList){
for (IndexType kk = 0; kk < myNumExclusion; ++kk){
if (ptr_excllist[kk*blockDim.x+tid] == targetIdx) {
goto skipInter;
}
}
}
else {
for (IndexType kk = 0; kk < myNumExclusion; ++kk){
if (dexcllist.exclusionNeighborIndex[kk*dexcllist.stride+ii] == targetIdx) {
goto skipInter;
}
}
}
nbForceIndex = ( nlist.forceIndex [nlistPosi] );
target = ( tex1Dfetch(global_texRef_interaction_coord, targetIdx) );
diffx = ( target.x - ref.x );
diffy = ( target.y - ref.y );
diffz = ( target.z - ref.z );
shortestImage (box, &diffx, &diffy, &diffz);
nbForce (nonBondedInteractionType[nbForceIndex],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[nbForceIndex]],
diffx, diffy, diffz,
&fx, &fy, &fz);
fsumx += fx;
fsumy += fy;
fsumz += fz;
skipInter:
{
}
}
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
}
__global__ void
calNonBondedInteraction_neighbor (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const ScalorType * rcut,
const RectangularBox box,
const DeviceNeighborList nlist,
ScalorType * statistic_nb_buff0,
ScalorType * statistic_nb_buff1,
ScalorType * statistic_nb_buff2,
ScalorType * statistic_nb_buff3)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
ScalorType fsumx = 0.0f;
ScalorType fsumy = 0.0f;
ScalorType fsumz = 0.0f;
IndexType ii = tid + bid * blockDim.x;
ScalorType myPoten = 0.0f, myVxx = 0.0f, myVyy = 0.0f, myVzz = 0.0f;
if (ii < numAtom) {
CoordType ref;
ref = tex1Dfetch(global_texRef_interaction_coord, ii);
ScalorType refrcut2 = rcut[ii];
refrcut2 = refrcut2 * refrcut2;
ScalorType fx(0.f), fy(0.f), fz(0.f);
ScalorType dp;
for (IndexType jj = 0, nlistPosi = ii;
jj < nlist.Nneighbor[ii];
++jj, nlistPosi += nlist.stride){
IndexType targetIdx ( nlist.data[nlistPosi] );
IndexType nbForceIndex ( nlist.forceIndex [nlistPosi] );
CoordType target ( tex1Dfetch(global_texRef_interaction_coord, targetIdx) );
ScalorType diffx ( target.x - ref.x );
ScalorType diffy ( target.y - ref.y );
ScalorType diffz ( target.z - ref.z );
shortestImage (box, &diffx, &diffy, &diffz);
if (diffx*diffx + diffy*diffy + diffz*diffz < refrcut2){
nbForcePoten (nonBondedInteractionType[nbForceIndex],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[nbForceIndex]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
// printf ("## %d\t%d\t%f\t%f\t%f\n",
// ii, targetIdx,
// ref.z, target.z, fz);
// printf ("%f, %f %f %f, %f %f %f, %f %f %f, %f\n",
// sqrtf(diffx*diffx+diffy*diffy+diffz*diffz),
// ref.x, ref.y, ref.z,
// target.x, target.y, target.z,
// diffx, diffy, diffz,
// dp
// );
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
}
}
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
if (ii < numAtom){
statistic_nb_buff0[ii] = myPoten * 0.5f;
statistic_nb_buff1[ii] = myVxx * 0.5f;
statistic_nb_buff2[ii] = myVyy * 0.5f;
statistic_nb_buff3[ii] = myVzz * 0.5f;
}
}
__global__ void
calNonBondedInteraction_neighbor (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const DeviceNeighborList nlist,
const DeviceExclusionList dexcllist,
const bool sharedExclusionList,
ScalorType * statistic_nb_buff0,
ScalorType * statistic_nb_buff1,
ScalorType * statistic_nb_buff2,
ScalorType * statistic_nb_buff3)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
ScalorType fsumx = 0.0f;
ScalorType fsumy = 0.0f;
ScalorType fsumz = 0.0f;
IndexType ii = tid + bid * blockDim.x;
ScalorType myPoten = 0.0f, myVxx = 0.0f, myVyy = 0.0f, myVzz = 0.0f;
IndexType * ptr_excllist;
IndexType myNumExclusion (0);
extern __shared__ char excl_sbuff[];
if (dexcllist.maxNumExclusion != 0 && ii < numAtom){
myNumExclusion = dexcllist.numExclusion[ii];
if (sharedExclusionList){
ptr_excllist = (IndexType *) excl_sbuff;
for (IndexType jj = 0; jj < myNumExclusion; ++jj){
ptr_excllist[jj*blockDim.x+tid] =
dexcllist.exclusionNeighborIndex[jj*dexcllist.stride+ii];
}
}
}
if (ii < numAtom) {
CoordType ref;
ref = tex1Dfetch(global_texRef_interaction_coord, ii);
ScalorType fx(0.f), fy(0.f), fz(0.f);
ScalorType dp;
for (IndexType jj = 0, nlistPosi = ii;
jj < nlist.Nneighbor[ii];
++jj, nlistPosi += nlist.stride){
IndexType targetIdx ( nlist.data[nlistPosi] );
IndexType nbForceIndex;
CoordType target;
ScalorType diffx, diffy, diffz;
if (sharedExclusionList){
for (IndexType kk = 0; kk < myNumExclusion; ++kk){
if (ptr_excllist[kk*blockDim.x+tid] == targetIdx) {
goto skipInter;
}
}
}
else {
for (IndexType kk = 0; kk < myNumExclusion; ++kk){
if (dexcllist.exclusionNeighborIndex[kk*dexcllist.stride+ii] == targetIdx) {
goto skipInter;
}
}
}
nbForceIndex = ( nlist.forceIndex [nlistPosi] );
target = ( tex1Dfetch(global_texRef_interaction_coord, targetIdx) );
diffx = ( target.x - ref.x );
diffy = ( target.y - ref.y );
diffz = ( target.z - ref.z );
shortestImage (box, &diffx, &diffy, &diffz);
nbForcePoten (nonBondedInteractionType[nbForceIndex],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[nbForceIndex]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
// printf ("## %d\t%d\t%f\t%f\t%f\n",
// ii, targetIdx,
// ref.z, target.z, fz);
// printf ("%f, %f %f %f, %f %f %f, %f %f %f, %f\n",
// sqrtf(diffx*diffx+diffy*diffy+diffz*diffz),
// ref.x, ref.y, ref.z,
// target.x, target.y, target.z,
// diffx, diffy, diffz,
// dp
// );
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
skipInter:
{
}
}
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
if (ii < numAtom){
statistic_nb_buff0[ii] = myPoten * 0.5f;
statistic_nb_buff1[ii] = myVxx * 0.5f;
statistic_nb_buff2[ii] = myVyy * 0.5f;
statistic_nb_buff3[ii] = myVzz * 0.5f;
}
}
__global__ void calBondInteraction (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const RectangularBox box,
const DeviceBondList bdlist)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
ScalorType fsumx = 0.0f;
ScalorType fsumy = 0.0f;
ScalorType fsumz = 0.0f;
IndexType ii = tid + bid * blockDim.x;
if (ii >= numAtom) return;
CoordType ref;
#ifdef COMPILE_NO_TEX
ref = coord[ii];
#else
ref = tex1Dfetch(global_texRef_interaction_coord, ii);
#endif
IndexType myNumBond = bdlist.numBond[ii];
for (IndexType jj = 0; jj < bdlist.maxNumBond; ++jj){
if (jj == myNumBond) break;
IndexType targetIdx = bdlist.bondNeighborIndex[jj * bdlist.stride + ii];
CoordType target;
#ifdef COMPILE_NO_TEX
target = coord[targetIdx];
#else
target = tex1Dfetch(global_texRef_interaction_coord, targetIdx);
#endif
ScalorType diffx, diffy, diffz;
diffx = target.x - ref.x;
diffy = target.y - ref.y;
diffz = target.z - ref.z;
shortestImage (box, &diffx, &diffy, &diffz);
ScalorType fx, fy, fz;
IndexType bondFindex = bdlist.bondIndex[jj * bdlist.stride + ii];
bondForce (bondedInteractionType[bondFindex],
&bondedInteractionParameter
[bondedInteractionParameterPosition[bondFindex]],
diffx, diffy, diffz, &fx, &fy, &fz);
fsumx += fx;
fsumy += fy;
fsumz += fz;
}
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
__global__ void calBondInteraction (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const RectangularBox box,
const DeviceBondList bdlist,
ScalorType * statistic_b_buff0,
ScalorType * statistic_b_buff1,
ScalorType * statistic_b_buff2,
ScalorType * statistic_b_buff3,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
extern __shared__ volatile ScalorType buff[];
buff[tid] = 0.f;
__syncthreads();
ScalorType fsumx = 0.0f;
ScalorType fsumy = 0.0f;
ScalorType fsumz = 0.0f;
IndexType ii = tid + bid * blockDim.x;
ScalorType myPoten = 0.0f, myVxx = 0.0f, myVyy = 0.0f, myVzz = 0.0f;
if (ii < numAtom) {
CoordType ref;
#ifdef COMPILE_NO_TEX
ref = coord[ii];
#else
ref = tex1Dfetch(global_texRef_interaction_coord, ii);
#endif
IndexType myNumBond = bdlist.numBond[ii];
for (IndexType jj = 0; jj < bdlist.maxNumBond; ++jj){
if (jj == myNumBond) break;
IndexType targetIdx = bdlist.bondNeighborIndex[jj * bdlist.stride + ii];
CoordType target;
#ifdef COMPILE_NO_TEX
target = coord[targetIdx];
#else
target = tex1Dfetch(global_texRef_interaction_coord, targetIdx);
#endif
ScalorType diffx, diffy, diffz;
diffx = target.x - ref.x;
diffy = target.y - ref.y;
diffz = target.z - ref.z;
shortestImage (box, &diffx, &diffy, &diffz);
ScalorType fx, fy, fz;
IndexType bondFindex = bdlist.bondIndex[jj * bdlist.stride + ii];
ScalorType dp;
bondForcePoten (bondedInteractionType[bondFindex],
&bondedInteractionParameter
[bondedInteractionParameterPosition[bondFindex]],
diffx, diffy, diffz, &fx, &fy, &fz, &dp);
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
}
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
buff[tid] = myPoten * 0.5f;
sumVectorBlockBuffer_2 (buff);
if (threadIdx.x == 0) statistic_b_buff0[bid] = buff[0];
__syncthreads();
buff[tid] = myVxx * 0.5f;
sumVectorBlockBuffer_2 (buff);
if (threadIdx.x == 0) statistic_b_buff1[bid] = buff[0];
__syncthreads();
buff[tid] = myVyy * 0.5f;
sumVectorBlockBuffer_2 (buff);
if (threadIdx.x == 0) statistic_b_buff2[bid] = buff[0];
__syncthreads();
buff[tid] = myVzz * 0.5f;
sumVectorBlockBuffer_2 (buff);
if (threadIdx.x == 0) statistic_b_buff3[bid] = buff[0];
__syncthreads();
}
__global__ void calAngleInteraction (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const RectangularBox box,
const DeviceAngleList anglelist)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
ScalorType fsumx = 0.0f;
ScalorType fsumy = 0.0f;
ScalorType fsumz = 0.0f;
IndexType ii = tid + bid * blockDim.x;
IndexType myNumAngle;
if (ii < numAtom){
myNumAngle = anglelist.numAngle[ii];
}
else {
myNumAngle = 0;
return ;
}
// if (__all(myNumAngle == 0)) return ;
if (ii < numAtom) {
CoordType ref;
#ifdef COMPILE_NO_TEX
ref = coord[ii];
#else
ref = tex1Dfetch(global_texRef_interaction_coord, ii);
#endif
for (IndexType jj = 0; jj < myNumAngle; ++jj){
IndexType targetIdx0 =
anglelist.angleNeighborIndex[((jj<<1) ) * anglelist.stride + ii];
IndexType targetIdx1 =
anglelist.angleNeighborIndex[((jj<<1)+1) * anglelist.stride + ii];
IndexType myPosi =
anglelist.anglePosi[jj * anglelist.stride + ii];
CoordType target0, target1;
#ifdef COMPILE_NO_TEX
target0 = coord[targetIdx0];
target1 = coord[targetIdx1];
#else
target0 = tex1Dfetch(global_texRef_interaction_coord, targetIdx0);
target1 = tex1Dfetch(global_texRef_interaction_coord, targetIdx1);
#endif
ScalorType diff0x, diff0y, diff0z;
ScalorType diff1x, diff1y, diff1z;
bool center (myPosi == 1);
if (center){
diff0x = ref.x - target0.x;
diff0y = ref.y - target0.y;
diff0z = ref.z - target0.z;
diff1x = target1.x - ref.x;
diff1y = target1.y - ref.y;
diff1z = target1.z - ref.z;
} else {
diff0x = target0.x - ref.x;
diff0y = target0.y - ref.y;
diff0z = target0.z - ref.z;
diff1x = target1.x - target0.x;
diff1y = target1.y - target0.y;
diff1z = target1.z - target0.z;
}
shortestImage (box, &diff0x, &diff0y, &diff0z);
shortestImage (box, &diff1x, &diff1y, &diff1z);
ScalorType f0x, f0y, f0z;
ScalorType f1x, f1y, f1z;
IndexType angleFindex = anglelist.angleIndex[jj * anglelist.stride + ii];
angleForce (center,
bondedInteractionType[angleFindex],
&bondedInteractionParameter
[bondedInteractionParameterPosition[angleFindex]],
diff0x, diff0y, diff0z,
diff1x, diff1y, diff1z,
&f0x, &f0y, &f0z,
&f1x, &f1y, &f1z);
if (center){
fsumx += f0x + f1x;
fsumy += f0y + f1y;
fsumz += f0z + f1z;
}
else {
fsumx -= f0x;
fsumy -= f0y;
fsumz -= f0z;
}
}
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
}
__global__ void calAngleInteraction (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const RectangularBox box,
const DeviceAngleList anglelist,
ScalorType * statistic_b_buff0,
ScalorType * statistic_b_buff1,
ScalorType * statistic_b_buff2,
ScalorType * statistic_b_buff3,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
ScalorType fsumx = 0.0f;
ScalorType fsumy = 0.0f;
ScalorType fsumz = 0.0f;
ScalorType myPoten = 0.0f, myVxx = 0.0f, myVyy = 0.0f, myVzz = 0.0f;
IndexType ii = tid + bid * blockDim.x;
IndexType myNumAngle;
extern __shared__ volatile ScalorType buff[];
buff[tid] = 0.f;
__syncthreads();
if (ii < numAtom) {
CoordType ref;
#ifdef COMPILE_NO_TEX
ref = coord[ii];
#else
ref = tex1Dfetch(global_texRef_interaction_coord, ii);
#endif
myNumAngle = anglelist.numAngle[ii];
for (IndexType jj = 0; jj < myNumAngle; ++jj){
IndexType targetIdx0 =
anglelist.angleNeighborIndex[((jj<<1) ) * anglelist.stride + ii];
IndexType targetIdx1 =
anglelist.angleNeighborIndex[((jj<<1)+1) * anglelist.stride + ii];
IndexType myPosi =
anglelist.anglePosi[jj * anglelist.stride + ii];
CoordType target0, target1;
#ifdef COMPILE_NO_TEX
target0 = coord[targetIdx0];
target1 = coord[targetIdx1];
#else
target0 = tex1Dfetch(global_texRef_interaction_coord, targetIdx0);
target1 = tex1Dfetch(global_texRef_interaction_coord, targetIdx1);
#endif
ScalorType diff0x, diff0y, diff0z;
ScalorType diff1x, diff1y, diff1z;
bool center = (myPosi == 1);
if (center){
diff0x = ref.x - target0.x;
diff0y = ref.y - target0.y;
diff0z = ref.z - target0.z;
diff1x = target1.x - ref.x;
diff1y = target1.y - ref.y;
diff1z = target1.z - ref.z;
} else {
diff0x = target0.x - ref.x;
diff0y = target0.y - ref.y;
diff0z = target0.z - ref.z;
diff1x = target1.x - target0.x;
diff1y = target1.y - target0.y;
diff1z = target1.z - target0.z;
}
shortestImage (box, &diff0x, &diff0y, &diff0z);
shortestImage (box, &diff1x, &diff1y, &diff1z);
ScalorType f0x, f0y, f0z;
ScalorType f1x, f1y, f1z;
IndexType angleFindex = anglelist.angleIndex[jj * anglelist.stride + ii];
ScalorType dp;
angleForcePoten (center,
bondedInteractionType[angleFindex],
&bondedInteractionParameter
[bondedInteractionParameterPosition[angleFindex]],
diff0x, diff0y, diff0z,
diff1x, diff1y, diff1z,
&f0x, &f0y, &f0z,
&f1x, &f1y, &f1z,
&dp);
myPoten += dp;
if (center){
fsumx += f0x + f1x;
fsumy += f0y + f1y;
fsumz += f0z + f1z;
myVxx -= f0x * diff0x - f1x * diff1x;
myVyy -= f0y * diff0y - f1y * diff1y;
myVzz -= f0z * diff0z - f1z * diff1z;
}
else {
fsumx -= f0x;
fsumy -= f0y;
fsumz -= f0z;
}
}
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
buff[tid] = myPoten * 0.33333333333333333f;
sumVectorBlockBuffer_2 (buff);
if (threadIdx.x == 0) statistic_b_buff0[bid] = buff[0];
__syncthreads();
buff[tid] = myVxx;
sumVectorBlockBuffer_2 (buff);
if (threadIdx.x == 0) statistic_b_buff1[bid] = buff[0];
__syncthreads();
buff[tid] = myVyy;
sumVectorBlockBuffer_2 (buff);
if (threadIdx.x == 0) statistic_b_buff2[bid] = buff[0];
__syncthreads();
buff[tid] = myVzz;
sumVectorBlockBuffer_2 (buff);
if (threadIdx.x == 0) statistic_b_buff3[bid] = buff[0];
__syncthreads();
}
// static __device__ IndexType shiftedD3toD1 (
// DeviceCellList clist,
// RectangularBoxGeometry::RectangularBox box,
// int ix, int iy, int iz,
// ScalorType * shiftx , ScalorType * shifty, ScalorType * shiftz)
// {
// int tmp;
// ix += (tmp = -int(floorf(ix * clist.NCelli.x))) * clist.NCell.x;
// *shiftx = tmp * box.size.x;
// iy += (tmp = -int(floorf(iy * clist.NCelli.y))) * clist.NCell.y;
// *shifty = tmp * box.size.y;
// iz += (tmp = -int(floorf(iz * clist.NCelli.z))) * clist.NCell.z;
// *shiftz = tmp * box.size.z;
// return D3toD1 (clist.NCell, ix, iy, iz);
// }
// __global__ void calNonBondedInteraction (
// const IndexType numAtom,
// const CoordType * coord,
// ScalorType * forcx,
// ScalorType * forcy,
// ScalorType * forcz,
// const TypeType * type,
// const RectangularBox box,
// DeviceCellList clist,
// mdError_t * ptr_de)
// {
// // RectangularBoxGeometry::normalizeSystem (box, &ddata);
// IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
// IndexType tid = threadIdx.x;
// IndexType bidx, bidy, bidz;
// D1toD3 (clist.NCell, bid, bidx, bidy, bidz);
// // load index
// IndexType ii = getDeviceCellListData (clist, bid, tid);
// // load iith coordinate // use texturefetch instead
// CoordType ref;
// TypeType reftype;
// ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
// if (ii != MaxIndexValue){
// #ifdef COMPILE_NO_TEX
// ref = coord[ii];
// reftype = type[ii];
// #else
// ref = tex1Dfetch (global_texRef_interaction_coord, ii);
// // reftype = tex1Dfetch(global_texRef_interaction_type, ii);
// #endif
// }
// ScalorType rlist = clist.rlist;
// // the target index and coordinates are shared
// extern __shared__ volatile char pub_sbuff[];
// volatile IndexType * targetIndexes =
// (volatile IndexType *) pub_sbuff;
// CoordType * target =
// (CoordType *) &targetIndexes[roundUp4(blockDim.x)];
// volatile TypeType * targettype =
// (volatile TypeType *) &target[roundUp4(blockDim.x)];
// __syncthreads();
// // bool oneCellX(false), oneCellY(false), oneCellZ(false);
// // if (clist.NCell.x == 1) oneCellX = true;
// // if (clist.NCell.y == 1) oneCellY = true;
// // if (clist.NCell.z == 1) oneCellZ = true;
// // int upperx(1), lowerx(-1);
// // int uppery(1), lowery(-1);
// // int upperz(1), lowerz(-1);
// // if (oneCellX) {lowerx = 0; upperx = 0;}
// // if (oneCellY) {lowery = 0; uppery = 0;}
// // if (oneCellZ) {lowerz = 0; upperz = 0;}
// ScalorType rlist2 = rlist * rlist;
// // loop over 27 neighbor cells
// #pragma unroll 3
// // for (int nci = bidx + lowerx; nci <= bidx + upperx; ++nci){
// // for (int ncj = bidy + lowery; ncj <= bidy + uppery; ++ncj){
// // for (int nck = bidz + lowerz; nck <= bidz + upperz; ++nck){
// for (int nci = int(bidx) - 1; nci <= int(bidx) + 1; ++nci){
// for (int ncj = int(bidy) - 1; ncj <= int(bidy) + 1; ++ncj){
// for (int nck = int(bidz) - 1; nck <= int(bidz) + 1; ++nck){
// // for (int di = lowerx; di <= upperx; ++di){
// // for (int dj = lowery; dj <= uppery; ++dj){
// // for (int dk = lowerz; dk <= upperz; ++dk){
// __syncthreads();
// // the shift value of a cell is pre-computed
// ScalorType xshift, yshift, zshift;
// // int nci = di + bidx;
// // int ncj = dj + bidy;
// // int nck = dk + bidz;
// IndexType targetCellIdx = shiftedD3toD1 (clist, box,
// nci, ncj, nck,
// &xshift, &yshift, &zshift);
// // load target index and coordinates form global memary
// // IndexType tmp = (targetIndexes[tid] =
// // getDeviceCellListData(clist, targetCellIdx, tid));
// targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid);
// if (targetIndexes[tid] != MaxIndexValue){
// // #ifdef COMPILE_NO_TEX
// // target[tid] = coord[tmp];
// // // targettype[tid] = type[tmp];
// // #else
// target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]);
// // targettype[tid] = tex1Dfetch(global_texRef_interaction_type, tmp);
// // #endif
// }
// __syncthreads();
// // find neighbor
// if (ii != MaxIndexValue){
// for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){
// // if (targetIndexes[jj] == MaxIndexValue) break;
// ScalorType diffx = target[jj].x - xshift - ref.x;
// ScalorType diffy = target[jj].y - yshift - ref.y;
// ScalorType diffz = target[jj].z - zshift - ref.z;
// // if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx);
// // if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy);
// // if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz);
// //printf ("%d\t%d\t%f\t%f\n", ii,
// ScalorType dr2;
// if ((dr2 = (diffx*diffx+diffy*diffy+diffz*diffz)) < rlist2 &&
// targetIndexes[jj] != ii){
// IndexType fidx(0);
// // fidx = AtomNBForceTable::calForceIndex (
// // nonBondedInteractionTable,
// // const_numAtomType[0],
// // reftype,
// // targettype[jj]);
// // if (fidx != mdForceNULL) {
// ScalorType fx, fy, fz;
// nbForce (nonBondedInteractionType[fidx],
// &nonBondedInteractionParameter
// [nonBondedInteractionParameterPosition[fidx]],
// diffx, diffy, diffz,
// dr2,
// &fx, &fy, &fz);
// fsumx += fx;
// fsumy += fy;
// fsumz += fz;
// // }
// }
// }
// }
// }
// }
// }
// if (ii != MaxIndexValue){
// forcx[ii] += fsumx;
// forcy[ii] += fsumy;
// forcz[ii] += fsumz;
// }
// }
// __global__ void calNonBondedInteraction (
// const IndexType numAtom,
// const CoordType * coord,
// ScalorType * forcx,
// ScalorType * forcy,
// ScalorType * forcz,
// const TypeType * type,
// const RectangularBox box,
// DeviceCellList clist,
// ScalorType * statistic_nb_buff0,
// ScalorType * statistic_nb_buff1,
// ScalorType * statistic_nb_buff2,
// ScalorType * statistic_nb_buff3,
// mdError_t * ptr_de)
// {
// // RectangularBoxGeometry::normalizeSystem (box, &ddata);
// IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
// IndexType tid = threadIdx.x;
// IndexType bidx, bidy, bidz;
// D1toD3 (clist.NCell, bid, bidx, bidy, bidz);
// // load index
// IndexType ii = getDeviceCellListData (clist, bid, tid);
// // load iith coordinate // use texturefetch instead
// CoordType ref;
// TypeType reftype;
// ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
// ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f);
// if (ii != MaxIndexValue){
// #ifdef COMPILE_NO_TEX
// ref = coord[ii];
// reftype = type[ii];
// #else
// ref = tex1Dfetch (global_texRef_interaction_coord, ii);
// reftype = tex1Dfetch(global_texRef_interaction_type, ii);
// #endif
// }
// ScalorType rlist = clist.rlist;
// // the target index and coordinates are shared
// extern __shared__ volatile char pub_sbuff[];
// volatile IndexType * targetIndexes =
// (volatile IndexType *) pub_sbuff;
// CoordType * target =
// (CoordType *) &targetIndexes[roundUp4(blockDim.x)];
// volatile TypeType * targettype =
// (volatile TypeType *) &target[roundUp4(blockDim.x)];
// __syncthreads();
// // bool oneCellX(false), oneCellY(false), oneCellZ(false);
// // if (clist.NCell.x == 1) oneCellX = true;
// // if (clist.NCell.y == 1) oneCellY = true;
// // if (clist.NCell.z == 1) oneCellZ = true;
// // int upperx(1), lowerx(-1);
// // int uppery(1), lowery(-1);
// // int upperz(1), lowerz(-1);
// // if (oneCellX) {lowerx = 0; upperx = 0;}
// // if (oneCellY) {lowery = 0; uppery = 0;}
// // if (oneCellZ) {lowerz = 0; upperz = 0;}
// ScalorType rlist2 = rlist * rlist;
// // loop over 27 neighbor cells
// #pragma unroll 3
// for (int nci = int(bidx) - 1; nci <= int(bidx) + 1; ++nci){
// for (int ncj = int(bidy) - 1; ncj <= int(bidy) + 1; ++ncj){
// for (int nck = int(bidz) - 1; nck <= int(bidz) + 1; ++nck){
// // for (int di = lowerx; di <= upperx; ++di){
// // for (int dj = lowery; dj <= uppery; ++dj){
// // for (int dk = lowerz; dk <= upperz; ++dk){
// __syncthreads();
// // the shift value of a cell is pre-computed
// ScalorType xshift, yshift, zshift;
// // int nci = di + bidx;
// // int ncj = dj + bidy;
// // int nck = dk + bidz;
// IndexType targetCellIdx = shiftedD3toD1 (clist, box,
// nci, ncj, nck,
// &xshift, &yshift, &zshift);
// // load target index and coordinates form global memary
// IndexType tmp = (targetIndexes[tid] =
// getDeviceCellListData(clist, targetCellIdx, tid));
// if (tmp != MaxIndexValue){
// #ifdef COMPILE_NO_TEX
// target[tid] = coord[tmp];
// targettype[tid] = type[tmp];
// #else
// target[tid] = tex1Dfetch(global_texRef_interaction_coord, tmp);
// targettype[tid] = tex1Dfetch(global_texRef_interaction_type, tmp);
// #endif
// }
// __syncthreads();
// // find neighbor
// if (ii != MaxIndexValue){
// for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){
// ScalorType diffx = target[jj].x - xshift - ref.x;
// ScalorType diffy = target[jj].y - yshift - ref.y;
// ScalorType diffz = target[jj].z - zshift - ref.z;
// // if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx);
// // if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy);
// // if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz);
// //printf ("%d\t%d\t%f\t%f\n", ii,
// if ((diffx*diffx+diffy*diffy+diffz*diffz) < rlist2 &&
// targetIndexes[jj] != ii){
// IndexType fidx(0);
// // fidx = AtomNBForceTable::calForceIndex (
// // nonBondedInteractionTable,
// // const_numAtomType[0],
// // reftype,
// // targettype[jj]);
// // if (fidx != mdForceNULL) {
// ScalorType fx, fy, fz, dp;
// nbForcePoten (nonBondedInteractionType[fidx],
// &nonBondedInteractionParameter
// [nonBondedInteractionParameterPosition[fidx]],
// diffx, diffy, diffz,
// &fx, &fy, &fz, &dp);
// myPoten += dp;
// myVxx += fx * diffx;
// myVyy += fy * diffy;
// myVzz += fz * diffz;
// fsumx += fx;
// fsumy += fy;
// fsumz += fz;
// // }
// }
// }
// }
// }
// }
// }
// if (ii != MaxIndexValue){
// forcx[ii] += fsumx;
// forcy[ii] += fsumy;
// forcz[ii] += fsumz;
// statistic_nb_buff0[ii] = myPoten * 0.5f;
// statistic_nb_buff1[ii] = myVxx * 0.5f;
// statistic_nb_buff2[ii] = myVyy * 0.5f;
// statistic_nb_buff3[ii] = myVzz * 0.5f;
// }
// }
__global__ void
calNonBondedInteraction_cell (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const DeviceCellList clist,
const ScalorType rcut,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType bidx, bidy, bidz;
D1toD3 (clist.NCell, bid, bidx, bidy, bidz);
// load index
IndexType ii = getDeviceCellListData (clist, bid, tid);
// load iith coordinate // use texturefetch instead
CoordType ref;
TypeType reftype;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
if (ii != MaxIndexValue){
#ifdef COMPILE_NO_TEX
ref = coord[ii];
reftype = type[ii];
#else
ref = tex1Dfetch (global_texRef_interaction_coord, ii);
reftype = tex1Dfetch(global_texRef_interaction_type, ii);
#endif
}
// the target index and coordinates are shared
extern __shared__ volatile char pub_sbuff[];
volatile IndexType * targetIndexes =
(volatile IndexType *) pub_sbuff;
CoordType * target =
(CoordType *) &targetIndexes[roundUp4(blockDim.x)];
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
ScalorType rcut2 = rcut * rcut;
for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){
__syncthreads();
IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i);
CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i);
CoordType shift;
shift.x = shiftNoi.x * box.size.x;
shift.y = shiftNoi.y * box.size.y;
shift.z = shiftNoi.z * box.size.z;
targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid);
if (targetIndexes[tid] != MaxIndexValue){
target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]);
targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]);
}
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (clist.NCell.x == 1) oneCellX = true;
if (clist.NCell.y == 1) oneCellY = true;
if (clist.NCell.z == 1) oneCellZ = true;
__syncthreads();
// find neighbor
if (ii != MaxIndexValue){
for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){
// if (targetIndexes[jj] == MaxIndexValue) break;
ScalorType diffx = target[jj].x - shift.x - ref.x;
ScalorType diffy = target[jj].y - shift.y - ref.y;
ScalorType diffz = target[jj].z - shift.z - ref.z;
if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx);
if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy);
if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz);
//printf ("%d\t%d\t%f\t%f\n", ii,
// ScalorType dr2;
if (((diffx*diffx+diffy*diffy+diffz*diffz)) < rcut2 &&
targetIndexes[jj] != ii){
IndexType fidx(0);
fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[jj]);
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz;
nbForce (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
// dr2,
&fx, &fy, &fz);
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
}
}
}
if (ii != MaxIndexValue){
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
}
__global__ void
calNonBondedInteraction_cell (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const DeviceCellList clist,
const ScalorType rcut,
ScalorType * statistic_nb_buff0,
ScalorType * statistic_nb_buff1,
ScalorType * statistic_nb_buff2,
ScalorType * statistic_nb_buff3,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType bidx, bidy, bidz;
D1toD3 (clist.NCell, bid, bidx, bidy, bidz);
// load index
IndexType ii = getDeviceCellListData (clist, bid, tid);
// load iith coordinate // use texturefetch instead
CoordType ref;
TypeType reftype;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f);
if (ii != MaxIndexValue){
#ifdef COMPILE_NO_TEX
ref = coord[ii];
reftype = type[ii];
#else
ref = tex1Dfetch (global_texRef_interaction_coord, ii);
reftype = tex1Dfetch(global_texRef_interaction_type, ii);
#endif
}
// the target index and coordinates are shared
extern __shared__ volatile char pub_sbuff[];
volatile IndexType * targetIndexes =
(volatile IndexType *) pub_sbuff;
CoordType * target =
(CoordType *) &targetIndexes[roundUp4(blockDim.x)];
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
ScalorType rcut2 = rcut * rcut;
for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){
__syncthreads();
IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i);
CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i);
CoordType shift;
shift.x = shiftNoi.x * box.size.x;
shift.y = shiftNoi.y * box.size.y;
shift.z = shiftNoi.z * box.size.z;
targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid);
if (targetIndexes[tid] != MaxIndexValue){
target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]);
targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]);
}
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (clist.NCell.x == 1) oneCellX = true;
if (clist.NCell.y == 1) oneCellY = true;
if (clist.NCell.z == 1) oneCellZ = true;
__syncthreads();
// find neighbor
if (ii != MaxIndexValue){
for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){
// if (targetIndexes[jj] == MaxIndexValue) break;
ScalorType diffx = target[jj].x - shift.x - ref.x;
ScalorType diffy = target[jj].y - shift.y - ref.y;
ScalorType diffz = target[jj].z - shift.z - ref.z;
if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx);
if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy);
if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz);
//printf ("%d\t%d\t%f\t%f\n", ii,
// ScalorType dr2;
if (((diffx*diffx+diffy*diffy+diffz*diffz)) < rcut2 &&
targetIndexes[jj] != ii){
IndexType fidx(0);
fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[jj]);
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz, dp;
nbForcePoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
}
}
}
if (ii != MaxIndexValue){
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
statistic_nb_buff0[ii] = myPoten * 0.5f;
statistic_nb_buff1[ii] = myVxx * 0.5f;
statistic_nb_buff2[ii] = myVyy * 0.5f;
statistic_nb_buff3[ii] = myVzz * 0.5f;
}
}
__global__ void
calNonBondedInteraction (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
DeviceCellList clist,
const ScalorType rcut,
DeviceNeighborList nlist,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType bidx, bidy, bidz;
D1toD3 (clist.NCell, bid, bidx, bidy, bidz);
IndexType Nneighbor = 0;
// load index
IndexType ii = getDeviceCellListData (clist, bid, tid);
// load iith coordinate // use texturefetch instead
CoordType ref;
TypeType reftype;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
if (ii != MaxIndexValue){
#ifdef COMPILE_NO_TEX
ref = coord[ii];
reftype = type[ii];
#else
ref = tex1Dfetch (global_texRef_interaction_coord, ii);
reftype = tex1Dfetch(global_texRef_interaction_type, ii);
#endif
}
// ScalorType rlist = clist.rlist;
// the target index and coordinates are shared
extern __shared__ volatile char pub_sbuff[];
volatile IndexType * targetIndexes =
(volatile IndexType *) pub_sbuff;
CoordType * target =
(CoordType *) &targetIndexes[roundUp4(blockDim.x)];
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
ScalorType rlist2 = nlist.rlist * nlist.rlist;
ScalorType rcut2 = rcut * rcut;
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (clist.NCell.x == 1) oneCellX = true;
if (clist.NCell.y == 1) oneCellY = true;
if (clist.NCell.z == 1) oneCellZ = true;
for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){
__syncthreads();
IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i);
CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i);
CoordType shift;
shift.x = shiftNoi.x * box.size.x;
shift.y = shiftNoi.y * box.size.y;
shift.z = shiftNoi.z * box.size.z;
targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid);
if (targetIndexes[tid] != MaxIndexValue){
target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]);
targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]);
}
__syncthreads();
// find neighbor
if (ii != MaxIndexValue){
for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){
// if (targetIndexes[jj] == MaxIndexValue) break;
ScalorType diffx = target[jj].x - shift.x - ref.x;
ScalorType diffy = target[jj].y - shift.y - ref.y;
ScalorType diffz = target[jj].z - shift.z - ref.z;
if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx);
if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy);
if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz);
ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz);
IndexType fidx(0);
fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[jj]);
if (dr2 < rcut2 &&
targetIndexes[jj] != ii){
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz, dp;
nbForcePoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
if (dr2 < rlist2 &&
targetIndexes[jj] != ii){
IndexType listIdx = Nneighbor * nlist.stride + ii;
nlist.data[listIdx] = targetIndexes[jj];
nlist.forceIndex[listIdx] = fidx;
Nneighbor ++;
}
}
}
}
if (ii != MaxIndexValue){
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
if (Nneighbor > nlist.listLength && ptr_de != NULL){
*ptr_de = mdErrorShortNeighborList;
return;
}
nlist.Nneighbor[ii] = Nneighbor;
}
}
__global__ void
calNonBondedInteraction (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
DeviceCellList clist,
const ScalorType rcut,
DeviceNeighborList nlist,
ScalorType * statistic_nb_buff0,
ScalorType * statistic_nb_buff1,
ScalorType * statistic_nb_buff2,
ScalorType * statistic_nb_buff3,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType bidx, bidy, bidz;
D1toD3 (clist.NCell, bid, bidx, bidy, bidz);
IndexType Nneighbor = 0;
// load index
IndexType ii = getDeviceCellListData (clist, bid, tid);
// load iith coordinate // use texturefetch instead
CoordType ref;
TypeType reftype;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f);
if (ii != MaxIndexValue){
#ifdef COMPILE_NO_TEX
ref = coord[ii];
reftype = type[ii];
#else
ref = tex1Dfetch (global_texRef_interaction_coord, ii);
reftype = tex1Dfetch(global_texRef_interaction_type, ii);
#endif
}
// ScalorType rlist = clist.rlist;
// the target index and coordinates are shared
extern __shared__ volatile char pub_sbuff[];
volatile IndexType * targetIndexes =
(volatile IndexType *) pub_sbuff;
CoordType * target =
(CoordType *) &targetIndexes[roundUp4(blockDim.x)];
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
ScalorType rlist2 = nlist.rlist * nlist.rlist;
ScalorType rcut2 = rcut * rcut;
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (clist.NCell.x == 1) oneCellX = true;
if (clist.NCell.y == 1) oneCellY = true;
if (clist.NCell.z == 1) oneCellZ = true;
for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){
__syncthreads();
IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i);
CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i);
CoordType shift;
shift.x = shiftNoi.x * box.size.x;
shift.y = shiftNoi.y * box.size.y;
shift.z = shiftNoi.z * box.size.z;
targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid);
if (targetIndexes[tid] != MaxIndexValue){
target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]);
targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]);
}
__syncthreads();
// find neighbor
if (ii != MaxIndexValue){
for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){
ScalorType diffx = target[jj].x - shift.x - ref.x;
ScalorType diffy = target[jj].y - shift.y - ref.y;
ScalorType diffz = target[jj].z - shift.z - ref.z;
if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx);
if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy);
if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz);
ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz);
IndexType fidx(0);
fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[jj]);
if (dr2 < rcut2 &&
targetIndexes[jj] != ii){
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz, dp;
nbForcePoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
if (dr2 < rlist2 &&
targetIndexes[jj] != ii){
IndexType listIdx = Nneighbor * nlist.stride + ii;
nlist.data[listIdx] = targetIndexes[jj];
nlist.forceIndex[listIdx] = fidx;
Nneighbor ++;
}
}
}
}
if (ii != MaxIndexValue){
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
statistic_nb_buff0[ii] = myPoten * 0.5f;
statistic_nb_buff1[ii] = myVxx * 0.5f;
statistic_nb_buff2[ii] = myVyy * 0.5f;
statistic_nb_buff3[ii] = myVzz * 0.5f;
if (Nneighbor > nlist.listLength && ptr_de != NULL){
*ptr_de = mdErrorShortNeighborList;
return;
}
nlist.Nneighbor[ii] = Nneighbor;
}
}
__global__ void
calNonBondedInteraction_all (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const ScalorType rcut,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType numberAtom = numAtom;
IndexType ii = tid + bid * blockDim.x;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
extern __shared__ volatile char pub_sbuff[];
volatile CoordType * target =
(volatile CoordType *) pub_sbuff;
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
__syncthreads();
CoordType ref;
TypeType reftype;
if (ii < numberAtom){
ref = coord[ii];
reftype = type[ii];
}
ScalorType rcut2 = rcut * rcut;
for (IndexType targetBlockId = 0;
targetBlockId * blockDim.x < numberAtom; ++targetBlockId){
IndexType jj = tid + targetBlockId * blockDim.x;
__syncthreads();
if (jj < numberAtom){
target[tid].x = coord[jj].x;
target[tid].y = coord[jj].y;
target[tid].z = coord[jj].z;
targettype[tid] = type[jj];
}
__syncthreads();
if (ii < numberAtom){
for (IndexType kk = 0; kk < blockDim.x; ++kk){
if (kk + targetBlockId * blockDim.x >= numberAtom) break;
ScalorType diffx = target[kk].x - ref.x;
ScalorType diffy = target[kk].y - ref.y;
ScalorType diffz = target[kk].z - ref.z;
shortestImage (box, &diffx, &diffy, &diffz);
ScalorType dr2;
if ((dr2 = diffx*diffx+diffy*diffy+diffz*diffz) < rcut2 &&
kk + targetBlockId * blockDim.x != ii){
IndexType fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[kk]);
if (dr2 < rcut2 ) {
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz, dp;
nbForcePoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
}
}
}
}
if (ii < numberAtom){
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
}
}
__global__ void
calNonBondedInteraction_all (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const ScalorType rcut,
ScalorType * statistic_nb_buff0,
ScalorType * statistic_nb_buff1,
ScalorType * statistic_nb_buff2,
ScalorType * statistic_nb_buff3,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType numberAtom = numAtom;
IndexType ii = tid + bid * blockDim.x;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f);
extern __shared__ volatile char pub_sbuff[];
volatile CoordType * target =
(volatile CoordType *) pub_sbuff;
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
__syncthreads();
CoordType ref;
TypeType reftype;
if (ii < numberAtom){
ref = coord[ii];
reftype = type[ii];
}
ScalorType rcut2 = rcut * rcut;
for (IndexType targetBlockId = 0;
targetBlockId * blockDim.x < numberAtom; ++targetBlockId){
IndexType jj = tid + targetBlockId * blockDim.x;
__syncthreads();
if (jj < numberAtom){
target[tid].x = coord[jj].x;
target[tid].y = coord[jj].y;
target[tid].z = coord[jj].z;
targettype[tid] = type[jj];
}
__syncthreads();
if (ii < numberAtom){
for (IndexType kk = 0; kk < blockDim.x; ++kk){
if (kk + targetBlockId * blockDim.x >= numberAtom) break;
ScalorType diffx = target[kk].x - ref.x;
ScalorType diffy = target[kk].y - ref.y;
ScalorType diffz = target[kk].z - ref.z;
shortestImage (box, &diffx, &diffy, &diffz);
ScalorType dr2;
if ((dr2 = diffx*diffx+diffy*diffy+diffz*diffz) < rcut2 &&
kk + targetBlockId * blockDim.x != ii){
IndexType fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[kk]);
if (dr2 < rcut2 ) {
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz, dp;
nbForcePoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
}
}
}
}
if (ii < numberAtom){
forcx[ii] += fsumx;
forcy[ii] += fsumy;
forcz[ii] += fsumz;
statistic_nb_buff0[ii] = myPoten * 0.5f;
statistic_nb_buff1[ii] = myVxx * 0.5f;
statistic_nb_buff2[ii] = myVyy * 0.5f;
statistic_nb_buff3[ii] = myVzz * 0.5f;
}
}
__global__ void
calTwinRangeCorrection_cell (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const DeviceCellList clist,
const ScalorType rcut1,
const ScalorType rcut2,
ScalorType * statistic_nb_buff0,
ScalorType * statistic_nb_buff1,
ScalorType * statistic_nb_buff2,
ScalorType * statistic_nb_buff3,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType bidx, bidy, bidz;
D1toD3 (clist.NCell, bid, bidx, bidy, bidz);
// load index
IndexType ii = getDeviceCellListData (clist, bid, tid);
// load iith coordinate // use texturefetch instead
CoordType ref;
TypeType reftype;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f);
if (ii != MaxIndexValue){
#ifdef COMPILE_NO_TEX
ref = coord[ii];
reftype = type[ii];
#else
ref = tex1Dfetch (global_texRef_interaction_coord, ii);
reftype = tex1Dfetch(global_texRef_interaction_type, ii);
#endif
}
// the target index and coordinates are shared
extern __shared__ volatile char pub_sbuff[];
volatile IndexType * targetIndexes =
(volatile IndexType *) pub_sbuff;
CoordType * target =
(CoordType *) &targetIndexes[roundUp4(blockDim.x)];
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
ScalorType rcut12 = rcut1 * rcut1;
ScalorType rcut22 = rcut2 * rcut2;
for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){
__syncthreads();
IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i);
CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i);
CoordType shift;
shift.x = shiftNoi.x * box.size.x;
shift.y = shiftNoi.y * box.size.y;
shift.z = shiftNoi.z * box.size.z;
targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid);
if (targetIndexes[tid] != MaxIndexValue){
target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]);
targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]);
}
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (clist.NCell.x == 1) oneCellX = true;
if (clist.NCell.y == 1) oneCellY = true;
if (clist.NCell.z == 1) oneCellZ = true;
__syncthreads();
// find neighbor
if (ii != MaxIndexValue){
for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){
// if (targetIndexes[jj] == MaxIndexValue) break;
ScalorType diffx = target[jj].x - shift.x - ref.x;
ScalorType diffy = target[jj].y - shift.y - ref.y;
ScalorType diffz = target[jj].z - shift.z - ref.z;
if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx);
if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy);
if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz);
ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz);
if (dr2 < rcut22 && dr2 >= rcut12 &&
targetIndexes[jj] != ii){
IndexType fidx(0);
fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[jj]);
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz, dp;
nbForcePoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
// printf ("# %d\t%d\t%f\t%f\t%f\n",
// ii, targetIndexes[jj],
// ref.z, target[jj].z, fz);
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
}
}
}
if (ii != MaxIndexValue){
forcx[ii] = fsumx;
forcy[ii] = fsumy;
forcz[ii] = fsumz;
statistic_nb_buff0[ii] = myPoten * 0.5f;
statistic_nb_buff1[ii] = myVxx * 0.5f;
statistic_nb_buff2[ii] = myVyy * 0.5f;
statistic_nb_buff3[ii] = myVzz * 0.5f;
}
}
__global__ void
calTwinRangeCorrection_all (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const ScalorType rcut1,
const ScalorType rcut2,
ScalorType * statistic_nb_buff0,
ScalorType * statistic_nb_buff1,
ScalorType * statistic_nb_buff2,
ScalorType * statistic_nb_buff3,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType numberAtom = numAtom;
IndexType ii = tid + bid * blockDim.x;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f);
extern __shared__ volatile char pub_sbuff[];
volatile CoordType * target =
(volatile CoordType *) pub_sbuff;
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
__syncthreads();
CoordType ref;
TypeType reftype;
if (ii < numberAtom){
ref = coord[ii];
reftype = type[ii];
}
ScalorType rcut12 = rcut1 * rcut1;
ScalorType rcut22 = rcut2 * rcut2;
for (IndexType targetBlockId = 0;
targetBlockId * blockDim.x < numberAtom; ++targetBlockId){
IndexType jj = tid + targetBlockId * blockDim.x;
__syncthreads();
if (jj < numberAtom){
target[tid].x = coord[jj].x;
target[tid].y = coord[jj].y;
target[tid].z = coord[jj].z;
targettype[tid] = type[jj];
}
__syncthreads();
if (ii < numberAtom){
for (IndexType kk = 0; kk < blockDim.x; ++kk){
if (kk + targetBlockId * blockDim.x >= numberAtom) break;
ScalorType diffx = target[kk].x - ref.x;
ScalorType diffy = target[kk].y - ref.y;
ScalorType diffz = target[kk].z - ref.z;
shortestImage (box, &diffx, &diffy, &diffz);
ScalorType dr2 = diffx*diffx+diffy*diffy+diffz*diffz;
if (dr2 < rcut22 && dr2 >= rcut12 &&
kk + targetBlockId * blockDim.x != ii){
IndexType fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[kk]);
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz, dp;
nbForcePoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
}
}
}
if (ii < numberAtom){
forcx[ii] = fsumx;
forcy[ii] = fsumy;
forcz[ii] = fsumz;
statistic_nb_buff0[ii] = myPoten * 0.5f;
statistic_nb_buff1[ii] = myVxx * 0.5f;
statistic_nb_buff2[ii] = myVyy * 0.5f;
statistic_nb_buff3[ii] = myVzz * 0.5f;
}
}
__global__ void
buildNeighborListCalTwinRangeCorr_cell (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const DeviceCellList clist,
const ScalorType rcut1,
const ScalorType rcut2,
DeviceNeighborList nlist,
ScalorType * statistic_nb_buff0,
ScalorType * statistic_nb_buff1,
ScalorType * statistic_nb_buff2,
ScalorType * statistic_nb_buff3,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType bidx, bidy, bidz;
D1toD3 (clist.NCell, bid, bidx, bidy, bidz);
// set number of neighbor to 0
IndexType Nneighbor = 0;
// load index
IndexType ii = getDeviceCellListData (clist, bid, tid);
// load iith coordinate // use texturefetch instead
CoordType ref;
TypeType reftype;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f);
if (ii != MaxIndexValue){
#ifdef COMPILE_NO_TEX
ref = coord[ii];
reftype = type[ii];
#else
ref = tex1Dfetch (global_texRef_interaction_coord, ii);
reftype = tex1Dfetch(global_texRef_interaction_type, ii);
#endif
}
// the target index and coordinates are shared
extern __shared__ volatile char pub_sbuff[];
volatile IndexType * targetIndexes =
(volatile IndexType *) pub_sbuff;
CoordType * target =
(CoordType *) &targetIndexes[roundUp4(blockDim.x)];
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
ScalorType rcut12 = rcut1 * rcut1;
ScalorType rcut22 = rcut2 * rcut2;
for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){
__syncthreads();
IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i);
CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i);
CoordType shift;
shift.x = shiftNoi.x * box.size.x;
shift.y = shiftNoi.y * box.size.y;
shift.z = shiftNoi.z * box.size.z;
targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid);
if (targetIndexes[tid] != MaxIndexValue){
target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]);
targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]);
}
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (clist.NCell.x == 1) oneCellX = true;
if (clist.NCell.y == 1) oneCellY = true;
if (clist.NCell.z == 1) oneCellZ = true;
__syncthreads();
// find neighbor
if (ii != MaxIndexValue){
for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){
// if (targetIndexes[jj] == MaxIndexValue) break;
ScalorType diffx = target[jj].x - shift.x - ref.x;
ScalorType diffy = target[jj].y - shift.y - ref.y;
ScalorType diffz = target[jj].z - shift.z - ref.z;
if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx);
if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy);
if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz);
ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz);
if (targetIndexes[jj] != ii){
if (dr2 < rcut22 && dr2 >= rcut12 ){
IndexType fidx(0);
fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[jj]);
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz, dp;
nbForcePoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
// printf ("# %d\t%d\t%f\t%f\t%f\n",
// ii, targetIndexes[jj],
// ref.z, target[jj].z, fz);
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
else if (dr2 < rcut12){
IndexType fidx(0);
fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[jj]);
IndexType listIdx = Nneighbor * nlist.stride + ii;
nlist.data[listIdx] = targetIndexes[jj];
nlist.forceIndex[listIdx] = fidx;
Nneighbor ++;
}
}
}
}
}
if (ii != MaxIndexValue){
forcx[ii] = fsumx;
forcy[ii] = fsumy;
forcz[ii] = fsumz;
statistic_nb_buff0[ii] = myPoten * 0.5f;
statistic_nb_buff1[ii] = myVxx * 0.5f;
statistic_nb_buff2[ii] = myVyy * 0.5f;
statistic_nb_buff3[ii] = myVzz * 0.5f;
if (Nneighbor > nlist.listLength && ptr_de != NULL){
*ptr_de = mdErrorShortNeighborList;
}
nlist.Nneighbor[ii] = Nneighbor;
}
}
__global__ void
buildNeighborListCalTwinRangeCorr_all (const IndexType numAtom,
const CoordType * coord,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
const TypeType * type,
const RectangularBox box,
const ScalorType rcut1,
const ScalorType rcut2,
DeviceNeighborList nlist,
ScalorType * statistic_nb_buff0,
ScalorType * statistic_nb_buff1,
ScalorType * statistic_nb_buff2,
ScalorType * statistic_nb_buff3,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType numberAtom = numAtom;
IndexType Nneighbor = 0;
IndexType ii = tid + bid * blockDim.x;
ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f);
ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f);
extern __shared__ volatile char pub_sbuff[];
volatile CoordType * target =
(volatile CoordType *) pub_sbuff;
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
__syncthreads();
CoordType ref;
TypeType reftype;
if (ii < numberAtom){
ref = coord[ii];
reftype = type[ii];
}
ScalorType rcut12 = rcut1 * rcut1;
ScalorType rcut22 = rcut2 * rcut2;
for (IndexType targetBlockId = 0;
targetBlockId * blockDim.x < numberAtom; ++targetBlockId){
IndexType jj = tid + targetBlockId * blockDim.x;
__syncthreads();
if (jj < numberAtom){
target[tid].x = coord[jj].x;
target[tid].y = coord[jj].y;
target[tid].z = coord[jj].z;
targettype[tid] = type[jj];
}
__syncthreads();
if (ii < numberAtom){
for (IndexType kk = 0; kk < blockDim.x; ++kk){
if (kk + targetBlockId * blockDim.x >= numberAtom) break;
ScalorType diffx = target[kk].x - ref.x;
ScalorType diffy = target[kk].y - ref.y;
ScalorType diffz = target[kk].z - ref.z;
shortestImage (box, &diffx, &diffy, &diffz);
ScalorType dr2 = diffx*diffx+diffy*diffy+diffz*diffz;
if (kk + targetBlockId * blockDim.x != ii){
if (dr2 < rcut22 && dr2 >= rcut12 ){
IndexType fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[kk]);
// if (fidx != mdForceNULL) {
ScalorType fx, fy, fz, dp;
nbForcePoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz,
&fx, &fy, &fz, &dp);
myPoten += dp;
myVxx += fx * diffx;
myVyy += fy * diffy;
myVzz += fz * diffz;
fsumx += fx;
fsumy += fy;
fsumz += fz;
// }
}
else if (dr2 < rcut12){
IndexType fidx = AtomNBForceTable::calForceIndex (
const_nonBondedInteractionTable,
const_numAtomType[0],
reftype,
targettype[kk]);
IndexType listIdx = Nneighbor * nlist.stride + ii;
nlist.data[listIdx] = kk + targetBlockId * blockDim.x;
nlist.forceIndex[listIdx] = fidx;
Nneighbor ++;
}
}
}
}
}
if (ii < numberAtom){
forcx[ii] = fsumx;
forcy[ii] = fsumy;
forcz[ii] = fsumz;
statistic_nb_buff0[ii] = myPoten * 0.5f;
statistic_nb_buff1[ii] = myVxx * 0.5f;
statistic_nb_buff2[ii] = myVyy * 0.5f;
statistic_nb_buff3[ii] = myVzz * 0.5f;
if (Nneighbor > nlist.listLength && ptr_de != NULL){
*ptr_de = mdErrorShortNeighborList;
}
nlist.Nneighbor[ii] = Nneighbor;
}
}
__global__ void
widomDeltaPoten_NVT (const IndexType numTestParticle,
const CoordType * coordTestParticle,
const TypeType * typeTestParticle,
const IndexType numAtom,
const CoordType * coord,
const TypeType * type,
const RectangularBox box,
DeviceCellList clist,
ScalorType * statistic_nb_buff0,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
// IndexType ii = tid + bid * blockDim.x;
if (bid >= numTestParticle) return;
// extern __shared__ volatile char pub_sbuff_widom[];
// volatile ScalorType * sumbuff = (volatile ScalorType *) pub_sbuff_widom;
extern __shared__ volatile ScalorType sumbuff [];
CoordType refCoord = coordTestParticle[bid];
TypeType refType = typeTestParticle[bid];
ScalorType myPoten (0.0f);
IndexType refCelli, refCellj, refCellk;
refCelli = IndexType (refCoord.x * box.sizei.x * ScalorType(clist.NCell.x));
refCellj = IndexType (refCoord.y * box.sizei.y * ScalorType(clist.NCell.y));
refCellk = IndexType (refCoord.z * box.sizei.z * ScalorType(clist.NCell.z));
if (refCelli == clist.NCell.x){
refCelli -= clist.NCell.x;
refCoord.x -= box.size.x;
}
if (refCellj == clist.NCell.y){
refCellj -= clist.NCell.y;
refCoord.y -= box.size.y;
}
if (refCellk == clist.NCell.z){
refCellk -= clist.NCell.z;
refCoord.z -= box.size.z;
}
IndexType refCellIndex = D3toD1 (clist.NCell, refCelli, refCellj, refCellk);
for (IndexType i = 0; i < clist.numNeighborCell[refCellIndex]; ++i){
__syncthreads ();
IndexType targetCellIdx = getNeighborCellIndex (clist, refCellIndex, i);
CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, refCellIndex, i);
CoordType shift;
shift.x = shiftNoi.x * box.size.x;
shift.y = shiftNoi.y * box.size.y;
shift.z = shiftNoi.z * box.size.z;
IndexType targetIndex = getDeviceCellListData(clist, targetCellIdx, tid);
if (targetIndex != MaxIndexValue){
TypeType targettype = tex1Dfetch(global_texRef_interaction_type, targetIndex);
if (refType == targettype){
CoordType targetCoord = tex1Dfetch(global_texRef_interaction_coord, targetIndex);
ScalorType diffx = targetCoord.x - shift.x - refCoord.x;
ScalorType diffy = targetCoord.y - shift.y - refCoord.y;
ScalorType diffz = targetCoord.z - shift.z - refCoord.z;
ScalorType dr2 = ((diffx*diffx+diffy*diffy+diffz*diffz));
if (dr2 < clist.rlist*clist.rlist && dr2 > 1e-4){
IndexType fidx(0);
ScalorType dp;
fidx = AtomNBForceTable::
calForceIndex (const_nonBondedInteractionTable,
const_numAtomType[0],
refType,
refType);
nbPoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz, &dp);
myPoten += dp;
// printf ("dp: %f, %f %f %f\n", dp, diffx, diffy, diffz);
}
}
}
}
sumbuff[tid] = myPoten;
__syncthreads();
sumVectorBlockBuffer_2 (sumbuff);
__syncthreads();
if (tid == 0){
statistic_nb_buff0[bid] = sumbuff[0];
}
}
// if (tid == 0){
// // printf ("### du is %f\n", sumbuff[0]);
// statistic_nb_buff0[bid] = expf(- (sumbuff[0] + energyCorrection) / temperature);
// }
// }
__global__ void
widomDeltaPoten_allPair_NVT (const IndexType numTestParticle,
const CoordType * coordTestParticle,
const TypeType * typeTestParticle,
const IndexType numAtom,
const CoordType * coord,
const TypeType * type,
const RectangularBox box,
const ScalorType rlist,
ScalorType * statistic_nb_buff0,
mdError_t * ptr_de)
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
// IndexType ii = tid + bid * blockDim.x;
if (bid >= numTestParticle) return;
CoordType refCoord = coordTestParticle[bid];
TypeType refType = typeTestParticle[bid];
ScalorType myPoten = 0.;
extern __shared__ volatile ScalorType sumbuff [];
for (IndexType start = 0; start < numAtom; start += blockDim.x){
IndexType targetIndex = start + tid;
if (targetIndex >= numAtom) break;
TypeType targetType = type[targetIndex];
if (targetType != refType) continue;
CoordType targetCoord = coord[targetIndex];
ScalorType diffx = targetCoord.x - refCoord.x;
ScalorType diffy = targetCoord.y - refCoord.y;
ScalorType diffz = targetCoord.z - refCoord.z;
RectangularBoxGeometry::shortestImage (box, &diffx, &diffy, &diffz);
ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz);
if (dr2 < rlist * rlist && dr2 > 1e-4 ){
IndexType fidx(0);
ScalorType dp;
fidx = AtomNBForceTable::
calForceIndex (const_nonBondedInteractionTable,
const_numAtomType[0],
refType,
refType);
nbPoten (nonBondedInteractionType[fidx],
&nonBondedInteractionParameter
[nonBondedInteractionParameterPosition[fidx]],
diffx, diffy, diffz, &dp);
myPoten += dp;
}
}
sumbuff[tid] = myPoten;
__syncthreads();
sumVectorBlockBuffer_2 (sumbuff);
__syncthreads();
if (tid == 0){
statistic_nb_buff0[bid] = sumbuff[0];
}
}
// if (tid == 0){
// // printf ("### du is %f\n", sumbuff[0]);
// statistic_nb_buff0[bid] = expf(- (sumbuff[0] + energyCorrection) / temperature);
// }
// }
|
c71516f9e5d4cd1d2e70f3e80e2f0a8989c8d9db.hip | // !!! This is a file automatically generated by hipify!!!
/* copyright 2017 MapD Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* @file TopKSort.cu
* @author Minggang Yu <miyu@mapd.com>
* @brief Top-k sorting on streaming top-k heaps on VRAM
*
* Copyright (c) 2017 MapD Technologies, Inc. All rights reserved.
*/
#include "BufferEntryUtils.h"
#include "GpuMemUtils.h"
#include "ResultSetBufferAccessors.h"
#include "SortUtils.cuh"
#include "StreamingTopN.h"
#include "TopKSort.h"
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/partition.h>
#include <thrust/sort.h>
#include <iostream>
template <class K, class I = int32_t>
struct is_taken_entry {
is_taken_entry(const int8_t* buff, const size_t stride)
: buff_ptr(buff), key_stride(stride) {}
__host__ __device__ bool operator()(const I index) {
return !is_empty_entry<K>(static_cast<size_t>(index), buff_ptr, key_stride);
}
const int8_t* buff_ptr;
const size_t key_stride;
};
template <class K, class I = int32_t>
struct is_null_order_entry {
typedef I argument_type;
is_null_order_entry(const int8_t* base, const size_t stride, const int64_t nul)
: oe_base(base), oe_stride(stride), null_val(nul) {}
__host__ __device__ bool operator()(const I index) {
const auto oe_val = *reinterpret_cast<const K*>(oe_base + index * oe_stride);
switch (sizeof(K)) {
case 4:
return *reinterpret_cast<const int32_t*>(&oe_val) ==
static_cast<int32_t>(null_val);
case 8:
return *reinterpret_cast<const int64_t*>(&oe_val) == null_val;
default:
return false;
}
}
const int8_t* oe_base;
const size_t oe_stride;
const int64_t null_val;
};
template <typename ForwardIterator>
ForwardIterator partition_by_null(ForwardIterator first,
ForwardIterator last,
const int64_t null_val,
const bool nulls_first,
const int8_t* rows_ptr,
const GroupByBufferLayoutInfo& layout) {
if (nulls_first) {
return (layout.col_bytes == 4)
? thrust::partition(
first,
last,
is_null_order_entry<int32_t>(
rows_ptr + layout.col_off, layout.row_bytes, null_val))
: thrust::partition(
first,
last,
is_null_order_entry<int64_t>(
rows_ptr + layout.col_off, layout.row_bytes, null_val));
} else {
return (layout.col_bytes == 4)
? thrust::partition(
first,
last,
thrust::not1(is_null_order_entry<int32_t>(
rows_ptr + layout.col_off, layout.row_bytes, null_val)))
: thrust::partition(
first,
last,
thrust::not1(is_null_order_entry<int64_t>(
rows_ptr + layout.col_off, layout.row_bytes, null_val)));
}
}
template <class K, class I>
struct KeyFetcher {
KeyFetcher(K* out_base,
const int8_t* src_oe_base,
const size_t stride,
const I* indices)
: key_base(out_base), oe_base(src_oe_base), oe_stride(stride), idx_base(indices) {}
__host__ __device__ void operator()(const I index) {
key_base[index] = *reinterpret_cast<const K*>(oe_base + idx_base[index] * oe_stride);
}
K* key_base;
const int8_t* oe_base;
const size_t oe_stride;
const I* idx_base;
};
template <class K>
struct KeyReseter {
KeyReseter(int8_t* out_base, const size_t stride, const K emp_key)
: rows_base(out_base), key_stride(stride), empty_key(emp_key) {}
__host__ __device__ void operator()(const size_t index) {
K* key_ptr = reinterpret_cast<K*>(rows_base + index * key_stride);
*key_ptr = empty_key;
}
int8_t* rows_base;
const size_t key_stride;
const K empty_key;
};
// TODO(miyu) : switch to shared version in ResultSetSortImpl.cu.
template <class K, class I>
void collect_order_entry_column(thrust::device_ptr<K>& d_oe_col_buffer,
const int8_t* d_src_buffer,
const thrust::device_ptr<I>& d_idx_first,
const size_t idx_count,
const size_t oe_offset,
const size_t oe_stride) {
thrust::for_each(thrust::make_counting_iterator(size_t(0)),
thrust::make_counting_iterator(idx_count),
KeyFetcher<K, I>(thrust::raw_pointer_cast(d_oe_col_buffer),
d_src_buffer + oe_offset,
oe_stride,
thrust::raw_pointer_cast(d_idx_first)));
}
template <class K, class I>
void sort_indices_by_key(thrust::device_ptr<I> d_idx_first,
const size_t idx_count,
const thrust::device_ptr<K>& d_key_buffer,
const bool desc,
ThrustAllocator& allocator) {
if (desc) {
thrust::sort_by_key(thrust::device(allocator),
d_key_buffer,
d_key_buffer + idx_count,
d_idx_first,
thrust::greater<K>());
} else {
thrust::sort_by_key(
thrust::device(allocator), d_key_buffer, d_key_buffer + idx_count, d_idx_first);
}
}
template <class I = int32_t>
void do_radix_sort(thrust::device_ptr<I> d_idx_first,
const size_t idx_count,
const int8_t* d_src_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
ThrustAllocator& allocator) {
const auto& oe_type = layout.oe_target_info.sql_type;
if (oe_type.is_fp()) {
switch (layout.col_bytes) {
case 4: {
auto d_oe_buffer = get_device_ptr<float>(idx_count, allocator);
collect_order_entry_column(d_oe_buffer,
d_src_buffer,
d_idx_first,
idx_count,
layout.col_off,
layout.row_bytes);
sort_indices_by_key(d_idx_first, idx_count, d_oe_buffer, oe.is_desc, allocator);
break;
}
case 8: {
auto d_oe_buffer = get_device_ptr<double>(idx_count, allocator);
collect_order_entry_column(d_oe_buffer,
d_src_buffer,
d_idx_first,
idx_count,
layout.col_off,
layout.row_bytes);
sort_indices_by_key(d_idx_first, idx_count, d_oe_buffer, oe.is_desc, allocator);
break;
}
default:
CHECK(false);
}
return;
}
CHECK(oe_type.is_number() || oe_type.is_time());
switch (layout.col_bytes) {
case 4: {
auto d_oe_buffer = get_device_ptr<int32_t>(idx_count, allocator);
collect_order_entry_column(d_oe_buffer,
d_src_buffer,
d_idx_first,
idx_count,
layout.col_off,
layout.row_bytes);
sort_indices_by_key(d_idx_first, idx_count, d_oe_buffer, oe.is_desc, allocator);
break;
}
case 8: {
auto d_oe_buffer = get_device_ptr<int64_t>(idx_count, allocator);
collect_order_entry_column(d_oe_buffer,
d_src_buffer,
d_idx_first,
idx_count,
layout.col_off,
layout.row_bytes);
sort_indices_by_key(d_idx_first, idx_count, d_oe_buffer, oe.is_desc, allocator);
break;
}
default:
CHECK(false);
}
}
template <class I>
struct RowFetcher {
RowFetcher(int8_t* out_base,
const int8_t* in_base,
const I* indices,
const size_t row_sz)
: dst_base(out_base), src_base(in_base), idx_base(indices), row_size(row_sz) {}
__host__ __device__ void operator()(const I index) {
memcpy(dst_base + index * row_size, src_base + idx_base[index] * row_size, row_size);
}
int8_t* dst_base;
const int8_t* src_base;
const I* idx_base;
const size_t row_size;
};
template <typename DerivedPolicy>
void reset_keys_in_row_buffer(
const thrust::detail::execution_policy_base<DerivedPolicy>& exec,
int8_t* row_buffer,
const size_t key_width,
const size_t row_size,
const size_t first,
const size_t last) {
switch (key_width) {
case 4:
thrust::for_each(
exec,
thrust::make_counting_iterator(first),
thrust::make_counting_iterator(last),
KeyReseter<int32_t>(row_buffer, row_size, static_cast<int32_t>(EMPTY_KEY_32)));
break;
case 8:
thrust::for_each(
exec,
thrust::make_counting_iterator(first),
thrust::make_counting_iterator(last),
KeyReseter<int64_t>(row_buffer, row_size, static_cast<int64_t>(EMPTY_KEY_64)));
break;
default:
CHECK(false);
}
}
std::vector<int8_t> pop_n_rows_from_merged_heaps_gpu(
Data_Namespace::DataMgr* data_mgr,
const int64_t* dev_heaps,
const size_t heaps_size,
const size_t n,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t group_key_bytes,
const size_t thread_count,
const int device_id) {
const auto row_size = layout.row_bytes;
CHECK_EQ(heaps_size, streaming_top_n::get_heap_size(row_size, n, thread_count));
const int8_t* rows_ptr = reinterpret_cast<const int8_t*>(dev_heaps) +
streaming_top_n::get_rows_offset_of_heaps(n, thread_count);
const auto total_entry_count = n * thread_count;
ThrustAllocator thrust_allocator(data_mgr, device_id);
auto d_indices = get_device_ptr<int32_t>(total_entry_count, thrust_allocator);
thrust::sequence(d_indices, d_indices + total_entry_count);
auto separator = (group_key_bytes == 4)
? thrust::partition(d_indices,
d_indices + total_entry_count,
is_taken_entry<int32_t>(rows_ptr, row_size))
: thrust::partition(d_indices,
d_indices + total_entry_count,
is_taken_entry<int64_t>(rows_ptr, row_size));
const size_t actual_entry_count = separator - d_indices;
if (!actual_entry_count) {
std::vector<int8_t> top_rows(row_size * n);
reset_keys_in_row_buffer(
thrust::host, &top_rows[0], layout.col_bytes, row_size, 0, n);
return top_rows;
}
const auto& oe_type = layout.oe_target_info.sql_type;
if (oe_type.get_notnull()) {
do_radix_sort(d_indices, actual_entry_count, rows_ptr, oe, layout, thrust_allocator);
} else {
auto separator = partition_by_null(d_indices,
d_indices + actual_entry_count,
null_val_bit_pattern(oe_type, false),
oe.nulls_first,
rows_ptr,
layout);
if (oe.nulls_first) {
const size_t null_count = separator - d_indices;
if (null_count < actual_entry_count) {
do_radix_sort(separator,
actual_entry_count - null_count,
rows_ptr,
oe,
layout,
thrust_allocator);
}
} else {
const size_t nonnull_count = separator - d_indices;
if (nonnull_count > 0) {
do_radix_sort(d_indices, nonnull_count, rows_ptr, oe, layout, thrust_allocator);
}
}
}
const auto final_entry_count = ::min(n, actual_entry_count);
auto d_top_rows = get_device_ptr<int8_t>(row_size * n, thrust_allocator);
thrust::for_each(thrust::make_counting_iterator(size_t(0)),
thrust::make_counting_iterator(final_entry_count),
RowFetcher<int32_t>(thrust::raw_pointer_cast(d_top_rows),
rows_ptr,
thrust::raw_pointer_cast(d_indices),
row_size));
if (final_entry_count < n) {
reset_keys_in_row_buffer(thrust::device,
thrust::raw_pointer_cast(d_top_rows),
layout.col_bytes,
row_size,
final_entry_count,
n);
}
std::vector<int8_t> top_rows(row_size * n);
thrust::copy(d_top_rows, d_top_rows + row_size * n, top_rows.begin());
return top_rows;
}
| c71516f9e5d4cd1d2e70f3e80e2f0a8989c8d9db.cu | /* copyright 2017 MapD Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* @file TopKSort.cu
* @author Minggang Yu <miyu@mapd.com>
* @brief Top-k sorting on streaming top-k heaps on VRAM
*
* Copyright (c) 2017 MapD Technologies, Inc. All rights reserved.
*/
#include "BufferEntryUtils.h"
#include "GpuMemUtils.h"
#include "ResultSetBufferAccessors.h"
#include "SortUtils.cuh"
#include "StreamingTopN.h"
#include "TopKSort.h"
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/partition.h>
#include <thrust/sort.h>
#include <iostream>
template <class K, class I = int32_t>
struct is_taken_entry {
is_taken_entry(const int8_t* buff, const size_t stride)
: buff_ptr(buff), key_stride(stride) {}
__host__ __device__ bool operator()(const I index) {
return !is_empty_entry<K>(static_cast<size_t>(index), buff_ptr, key_stride);
}
const int8_t* buff_ptr;
const size_t key_stride;
};
template <class K, class I = int32_t>
struct is_null_order_entry {
typedef I argument_type;
is_null_order_entry(const int8_t* base, const size_t stride, const int64_t nul)
: oe_base(base), oe_stride(stride), null_val(nul) {}
__host__ __device__ bool operator()(const I index) {
const auto oe_val = *reinterpret_cast<const K*>(oe_base + index * oe_stride);
switch (sizeof(K)) {
case 4:
return *reinterpret_cast<const int32_t*>(&oe_val) ==
static_cast<int32_t>(null_val);
case 8:
return *reinterpret_cast<const int64_t*>(&oe_val) == null_val;
default:
return false;
}
}
const int8_t* oe_base;
const size_t oe_stride;
const int64_t null_val;
};
template <typename ForwardIterator>
ForwardIterator partition_by_null(ForwardIterator first,
ForwardIterator last,
const int64_t null_val,
const bool nulls_first,
const int8_t* rows_ptr,
const GroupByBufferLayoutInfo& layout) {
if (nulls_first) {
return (layout.col_bytes == 4)
? thrust::partition(
first,
last,
is_null_order_entry<int32_t>(
rows_ptr + layout.col_off, layout.row_bytes, null_val))
: thrust::partition(
first,
last,
is_null_order_entry<int64_t>(
rows_ptr + layout.col_off, layout.row_bytes, null_val));
} else {
return (layout.col_bytes == 4)
? thrust::partition(
first,
last,
thrust::not1(is_null_order_entry<int32_t>(
rows_ptr + layout.col_off, layout.row_bytes, null_val)))
: thrust::partition(
first,
last,
thrust::not1(is_null_order_entry<int64_t>(
rows_ptr + layout.col_off, layout.row_bytes, null_val)));
}
}
template <class K, class I>
struct KeyFetcher {
KeyFetcher(K* out_base,
const int8_t* src_oe_base,
const size_t stride,
const I* indices)
: key_base(out_base), oe_base(src_oe_base), oe_stride(stride), idx_base(indices) {}
__host__ __device__ void operator()(const I index) {
key_base[index] = *reinterpret_cast<const K*>(oe_base + idx_base[index] * oe_stride);
}
K* key_base;
const int8_t* oe_base;
const size_t oe_stride;
const I* idx_base;
};
template <class K>
struct KeyReseter {
KeyReseter(int8_t* out_base, const size_t stride, const K emp_key)
: rows_base(out_base), key_stride(stride), empty_key(emp_key) {}
__host__ __device__ void operator()(const size_t index) {
K* key_ptr = reinterpret_cast<K*>(rows_base + index * key_stride);
*key_ptr = empty_key;
}
int8_t* rows_base;
const size_t key_stride;
const K empty_key;
};
// TODO(miyu) : switch to shared version in ResultSetSortImpl.cu.
template <class K, class I>
void collect_order_entry_column(thrust::device_ptr<K>& d_oe_col_buffer,
const int8_t* d_src_buffer,
const thrust::device_ptr<I>& d_idx_first,
const size_t idx_count,
const size_t oe_offset,
const size_t oe_stride) {
thrust::for_each(thrust::make_counting_iterator(size_t(0)),
thrust::make_counting_iterator(idx_count),
KeyFetcher<K, I>(thrust::raw_pointer_cast(d_oe_col_buffer),
d_src_buffer + oe_offset,
oe_stride,
thrust::raw_pointer_cast(d_idx_first)));
}
template <class K, class I>
void sort_indices_by_key(thrust::device_ptr<I> d_idx_first,
const size_t idx_count,
const thrust::device_ptr<K>& d_key_buffer,
const bool desc,
ThrustAllocator& allocator) {
if (desc) {
thrust::sort_by_key(thrust::device(allocator),
d_key_buffer,
d_key_buffer + idx_count,
d_idx_first,
thrust::greater<K>());
} else {
thrust::sort_by_key(
thrust::device(allocator), d_key_buffer, d_key_buffer + idx_count, d_idx_first);
}
}
template <class I = int32_t>
void do_radix_sort(thrust::device_ptr<I> d_idx_first,
const size_t idx_count,
const int8_t* d_src_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
ThrustAllocator& allocator) {
const auto& oe_type = layout.oe_target_info.sql_type;
if (oe_type.is_fp()) {
switch (layout.col_bytes) {
case 4: {
auto d_oe_buffer = get_device_ptr<float>(idx_count, allocator);
collect_order_entry_column(d_oe_buffer,
d_src_buffer,
d_idx_first,
idx_count,
layout.col_off,
layout.row_bytes);
sort_indices_by_key(d_idx_first, idx_count, d_oe_buffer, oe.is_desc, allocator);
break;
}
case 8: {
auto d_oe_buffer = get_device_ptr<double>(idx_count, allocator);
collect_order_entry_column(d_oe_buffer,
d_src_buffer,
d_idx_first,
idx_count,
layout.col_off,
layout.row_bytes);
sort_indices_by_key(d_idx_first, idx_count, d_oe_buffer, oe.is_desc, allocator);
break;
}
default:
CHECK(false);
}
return;
}
CHECK(oe_type.is_number() || oe_type.is_time());
switch (layout.col_bytes) {
case 4: {
auto d_oe_buffer = get_device_ptr<int32_t>(idx_count, allocator);
collect_order_entry_column(d_oe_buffer,
d_src_buffer,
d_idx_first,
idx_count,
layout.col_off,
layout.row_bytes);
sort_indices_by_key(d_idx_first, idx_count, d_oe_buffer, oe.is_desc, allocator);
break;
}
case 8: {
auto d_oe_buffer = get_device_ptr<int64_t>(idx_count, allocator);
collect_order_entry_column(d_oe_buffer,
d_src_buffer,
d_idx_first,
idx_count,
layout.col_off,
layout.row_bytes);
sort_indices_by_key(d_idx_first, idx_count, d_oe_buffer, oe.is_desc, allocator);
break;
}
default:
CHECK(false);
}
}
template <class I>
struct RowFetcher {
RowFetcher(int8_t* out_base,
const int8_t* in_base,
const I* indices,
const size_t row_sz)
: dst_base(out_base), src_base(in_base), idx_base(indices), row_size(row_sz) {}
__host__ __device__ void operator()(const I index) {
memcpy(dst_base + index * row_size, src_base + idx_base[index] * row_size, row_size);
}
int8_t* dst_base;
const int8_t* src_base;
const I* idx_base;
const size_t row_size;
};
template <typename DerivedPolicy>
void reset_keys_in_row_buffer(
const thrust::detail::execution_policy_base<DerivedPolicy>& exec,
int8_t* row_buffer,
const size_t key_width,
const size_t row_size,
const size_t first,
const size_t last) {
switch (key_width) {
case 4:
thrust::for_each(
exec,
thrust::make_counting_iterator(first),
thrust::make_counting_iterator(last),
KeyReseter<int32_t>(row_buffer, row_size, static_cast<int32_t>(EMPTY_KEY_32)));
break;
case 8:
thrust::for_each(
exec,
thrust::make_counting_iterator(first),
thrust::make_counting_iterator(last),
KeyReseter<int64_t>(row_buffer, row_size, static_cast<int64_t>(EMPTY_KEY_64)));
break;
default:
CHECK(false);
}
}
std::vector<int8_t> pop_n_rows_from_merged_heaps_gpu(
Data_Namespace::DataMgr* data_mgr,
const int64_t* dev_heaps,
const size_t heaps_size,
const size_t n,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t group_key_bytes,
const size_t thread_count,
const int device_id) {
const auto row_size = layout.row_bytes;
CHECK_EQ(heaps_size, streaming_top_n::get_heap_size(row_size, n, thread_count));
const int8_t* rows_ptr = reinterpret_cast<const int8_t*>(dev_heaps) +
streaming_top_n::get_rows_offset_of_heaps(n, thread_count);
const auto total_entry_count = n * thread_count;
ThrustAllocator thrust_allocator(data_mgr, device_id);
auto d_indices = get_device_ptr<int32_t>(total_entry_count, thrust_allocator);
thrust::sequence(d_indices, d_indices + total_entry_count);
auto separator = (group_key_bytes == 4)
? thrust::partition(d_indices,
d_indices + total_entry_count,
is_taken_entry<int32_t>(rows_ptr, row_size))
: thrust::partition(d_indices,
d_indices + total_entry_count,
is_taken_entry<int64_t>(rows_ptr, row_size));
const size_t actual_entry_count = separator - d_indices;
if (!actual_entry_count) {
std::vector<int8_t> top_rows(row_size * n);
reset_keys_in_row_buffer(
thrust::host, &top_rows[0], layout.col_bytes, row_size, 0, n);
return top_rows;
}
const auto& oe_type = layout.oe_target_info.sql_type;
if (oe_type.get_notnull()) {
do_radix_sort(d_indices, actual_entry_count, rows_ptr, oe, layout, thrust_allocator);
} else {
auto separator = partition_by_null(d_indices,
d_indices + actual_entry_count,
null_val_bit_pattern(oe_type, false),
oe.nulls_first,
rows_ptr,
layout);
if (oe.nulls_first) {
const size_t null_count = separator - d_indices;
if (null_count < actual_entry_count) {
do_radix_sort(separator,
actual_entry_count - null_count,
rows_ptr,
oe,
layout,
thrust_allocator);
}
} else {
const size_t nonnull_count = separator - d_indices;
if (nonnull_count > 0) {
do_radix_sort(d_indices, nonnull_count, rows_ptr, oe, layout, thrust_allocator);
}
}
}
const auto final_entry_count = std::min(n, actual_entry_count);
auto d_top_rows = get_device_ptr<int8_t>(row_size * n, thrust_allocator);
thrust::for_each(thrust::make_counting_iterator(size_t(0)),
thrust::make_counting_iterator(final_entry_count),
RowFetcher<int32_t>(thrust::raw_pointer_cast(d_top_rows),
rows_ptr,
thrust::raw_pointer_cast(d_indices),
row_size));
if (final_entry_count < n) {
reset_keys_in_row_buffer(thrust::device,
thrust::raw_pointer_cast(d_top_rows),
layout.col_bytes,
row_size,
final_entry_count,
n);
}
std::vector<int8_t> top_rows(row_size * n);
thrust::copy(d_top_rows, d_top_rows + row_size * n, top_rows.begin());
return top_rows;
}
|
a7ee507fe14bbd54e9cc9b4a1137292bb9233127.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/hip/cub.cuh>
#include <ATen/hip/HIPConfig.h>
namespace at {
namespace cuda {
namespace cub {
namespace detail {
template<typename key_t, int value_size>
void radix_sort_pairs_impl(
const key_t *keys_in, key_t *keys_out,
const OpaqueType<value_size> *values_in, OpaqueType<value_size> *values_out,
int64_t n, bool descending, int64_t begin_bit, int64_t end_bit) {
TORCH_CHECK(n <= std::numeric_limits<int>::max(),
"cub sort does not support sorting more than INT_MAX elements");
using key_t_ = typename detail::cuda_type<key_t>::type;
auto allocator = c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
c10::DataPtr keys_out_owner;
if (keys_out == nullptr) {
keys_out_owner = allocator->allocate(n * sizeof(key_t));
keys_out = reinterpret_cast<key_t *>(keys_out_owner.get());
}
const key_t_ *keys_in_ = reinterpret_cast<const key_t_*>(keys_in);
key_t_ *keys_out_ = reinterpret_cast<key_t_*>(keys_out);
if (descending) {
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::hipcub::DeviceRadixSort::SortPairsDescending,
keys_in_, keys_out_, values_in, values_out, n,
begin_bit, end_bit, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA());
} else {
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::hipcub::DeviceRadixSort::SortPairs,
keys_in_, keys_out_, values_in, values_out, n,
begin_bit, end_bit, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA());
}
}
#define AT_INSTANTIATE_SORT_PAIRS(key_t, value_size) \
template void radix_sort_pairs_impl( \
const key_t *keys_in, key_t *keys_out, \
const OpaqueType<value_size> *values_in, \
OpaqueType<value_size> *values_out, \
int64_t n, bool descending, int64_t begin_bit, int64_t end_bit);
AT_INSTANTIATE_SORT_PAIRS(int32_t, 1)
AT_INSTANTIATE_SORT_PAIRS(int32_t, 2)
AT_INSTANTIATE_SORT_PAIRS(int32_t, 4)
AT_INSTANTIATE_SORT_PAIRS(int64_t, 1)
AT_INSTANTIATE_SORT_PAIRS(int64_t, 2)
AT_INSTANTIATE_SORT_PAIRS(int64_t, 4)
#define AT_INSTANTIATE_SORT_PAIRS_8(scalar_t, ScalarType) \
AT_INSTANTIATE_SORT_PAIRS(scalar_t, 8)
AT_FORALL_SCALAR_TYPES_AND2(Bool, Half, AT_INSTANTIATE_SORT_PAIRS_8)
// BFloat16 Radix sort is supported from ROCm 4.5 onwards
#if !AT_ROCM_ENABLED() || (AT_ROCM_ENABLED() && ROCM_VERSION >= 40500)
AT_INSTANTIATE_SORT_PAIRS(c10::BFloat16, 8)
#endif
} // namespace detail
template<typename key_t>
void radix_sort_keys(
const key_t *keys_in, key_t *keys_out,
int64_t n, bool descending, int64_t begin_bit, int64_t end_bit) {
TORCH_CHECK(n <= std::numeric_limits<int>::max(),
"cub sort does not support sorting more than INT_MAX elements");
using key_t_ = typename detail::cuda_type<key_t>::type;
const key_t_ *keys_in_ = reinterpret_cast<const key_t_*>(keys_in);
key_t_ *keys_out_ = reinterpret_cast<key_t_*>(keys_out);
if (descending) {
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::hipcub::DeviceRadixSort::SortKeysDescending,
keys_in_, keys_out_, n,
begin_bit, end_bit, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA());
} else {
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::hipcub::DeviceRadixSort::SortKeys,
keys_in_, keys_out_, n,
begin_bit, end_bit, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA());
}
}
template<typename scalar_t>
void unique(const scalar_t *input, scalar_t *output, int64_t *num_selected_out, int64_t num_items) {
TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
"cub unique does not support more than INT_MAX elements");
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::hipcub::DeviceSelect::Unique,
input, output, num_selected_out, num_items, at::hip::getCurrentHIPStreamMasqueradingAsCUDA());
}
template <typename scalar_t>
void run_length_encode(const scalar_t *input, scalar_t *output, int64_t *counts_out,
int64_t *length_out, int64_t num_items) {
TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
"cub run_length_encode does not support more than INT_MAX elements");
CUB_WRAPPER(
NO_ROCM(at_cuda_detail)::hipcub::DeviceRunLengthEncode::Encode,
input, output, counts_out, length_out, num_items,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA());
}
#define AT_INSTATIATE_CUB_TEMPLATES(scalar_t, ScalarType) \
template void radix_sort_keys( \
const scalar_t *keys_in, scalar_t *keys_out, int64_t n, \
bool descending, int64_t begin_bit, int64_t end_bit); \
template void unique( \
const scalar_t *input, scalar_t *output, \
int64_t *num_selected_out, int64_t num_items); \
template void run_length_encode( \
const scalar_t *input, scalar_t *output, int64_t *counts_out, \
int64_t *length_out, int64_t n);
AT_FORALL_SCALAR_TYPES_AND2(Bool, Half, AT_INSTATIATE_CUB_TEMPLATES)
namespace {
template <typename scalar_t>
struct SumOp {
__device__ scalar_t operator () (scalar_t a, scalar_t b) const {
return a + b;
}
};
}
template <typename input_t, typename output_t>
void inclusive_sum_truncating(const input_t *input, output_t *output, int64_t num_items) {
using NO_ROCM(at_cuda_detail)::hipcub::Sum;
inclusive_scan(input, output, Sum{}, num_items);
}
template void inclusive_sum_truncating(const int32_t *input, int32_t *output, int64_t num_items);
template void inclusive_sum_truncating(const int64_t *input, int64_t *output, int64_t num_items);
template void inclusive_sum_truncating(const int32_t *input, int64_t *output, int64_t num_items);
template <typename input_t, typename output_t>
void exclusive_sum_in_common_type(const input_t *input, output_t *output, int64_t num_items) {
using scalar_t = std::common_type_t<input_t, output_t>;
exclusive_scan(input, output, SumOp<scalar_t>{}, scalar_t(0), num_items);
}
template void exclusive_sum_in_common_type(const int32_t *input, int32_t *output, int64_t num_items);
template void exclusive_sum_in_common_type(const int64_t *input, int64_t *output, int64_t num_items);
template void exclusive_sum_in_common_type(const bool *input, int64_t *output, int64_t num_items);
template void exclusive_sum_in_common_type(const uint8_t *input, int64_t *output, int64_t num_items);
}}} // namespace at::cuda::cub
| a7ee507fe14bbd54e9cc9b4a1137292bb9233127.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/cuda/cub.cuh>
#include <ATen/cuda/CUDAConfig.h>
namespace at {
namespace cuda {
namespace cub {
namespace detail {
template<typename key_t, int value_size>
void radix_sort_pairs_impl(
const key_t *keys_in, key_t *keys_out,
const OpaqueType<value_size> *values_in, OpaqueType<value_size> *values_out,
int64_t n, bool descending, int64_t begin_bit, int64_t end_bit) {
TORCH_CHECK(n <= std::numeric_limits<int>::max(),
"cub sort does not support sorting more than INT_MAX elements");
using key_t_ = typename detail::cuda_type<key_t>::type;
auto allocator = c10::cuda::CUDACachingAllocator::get();
c10::DataPtr keys_out_owner;
if (keys_out == nullptr) {
keys_out_owner = allocator->allocate(n * sizeof(key_t));
keys_out = reinterpret_cast<key_t *>(keys_out_owner.get());
}
const key_t_ *keys_in_ = reinterpret_cast<const key_t_*>(keys_in);
key_t_ *keys_out_ = reinterpret_cast<key_t_*>(keys_out);
if (descending) {
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortPairsDescending,
keys_in_, keys_out_, values_in, values_out, n,
begin_bit, end_bit, c10::cuda::getCurrentCUDAStream());
} else {
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortPairs,
keys_in_, keys_out_, values_in, values_out, n,
begin_bit, end_bit, c10::cuda::getCurrentCUDAStream());
}
}
#define AT_INSTANTIATE_SORT_PAIRS(key_t, value_size) \
template void radix_sort_pairs_impl( \
const key_t *keys_in, key_t *keys_out, \
const OpaqueType<value_size> *values_in, \
OpaqueType<value_size> *values_out, \
int64_t n, bool descending, int64_t begin_bit, int64_t end_bit);
AT_INSTANTIATE_SORT_PAIRS(int32_t, 1)
AT_INSTANTIATE_SORT_PAIRS(int32_t, 2)
AT_INSTANTIATE_SORT_PAIRS(int32_t, 4)
AT_INSTANTIATE_SORT_PAIRS(int64_t, 1)
AT_INSTANTIATE_SORT_PAIRS(int64_t, 2)
AT_INSTANTIATE_SORT_PAIRS(int64_t, 4)
#define AT_INSTANTIATE_SORT_PAIRS_8(scalar_t, ScalarType) \
AT_INSTANTIATE_SORT_PAIRS(scalar_t, 8)
AT_FORALL_SCALAR_TYPES_AND2(Bool, Half, AT_INSTANTIATE_SORT_PAIRS_8)
// BFloat16 Radix sort is supported from ROCm 4.5 onwards
#if !AT_ROCM_ENABLED() || (AT_ROCM_ENABLED() && ROCM_VERSION >= 40500)
AT_INSTANTIATE_SORT_PAIRS(c10::BFloat16, 8)
#endif
} // namespace detail
template<typename key_t>
void radix_sort_keys(
const key_t *keys_in, key_t *keys_out,
int64_t n, bool descending, int64_t begin_bit, int64_t end_bit) {
TORCH_CHECK(n <= std::numeric_limits<int>::max(),
"cub sort does not support sorting more than INT_MAX elements");
using key_t_ = typename detail::cuda_type<key_t>::type;
const key_t_ *keys_in_ = reinterpret_cast<const key_t_*>(keys_in);
key_t_ *keys_out_ = reinterpret_cast<key_t_*>(keys_out);
if (descending) {
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortKeysDescending,
keys_in_, keys_out_, n,
begin_bit, end_bit, c10::cuda::getCurrentCUDAStream());
} else {
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortKeys,
keys_in_, keys_out_, n,
begin_bit, end_bit, c10::cuda::getCurrentCUDAStream());
}
}
template<typename scalar_t>
void unique(const scalar_t *input, scalar_t *output, int64_t *num_selected_out, int64_t num_items) {
TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
"cub unique does not support more than INT_MAX elements");
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::Unique,
input, output, num_selected_out, num_items, at::cuda::getCurrentCUDAStream());
}
template <typename scalar_t>
void run_length_encode(const scalar_t *input, scalar_t *output, int64_t *counts_out,
int64_t *length_out, int64_t num_items) {
TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
"cub run_length_encode does not support more than INT_MAX elements");
CUB_WRAPPER(
NO_ROCM(at_cuda_detail)::cub::DeviceRunLengthEncode::Encode,
input, output, counts_out, length_out, num_items,
at::cuda::getCurrentCUDAStream());
}
#define AT_INSTATIATE_CUB_TEMPLATES(scalar_t, ScalarType) \
template void radix_sort_keys( \
const scalar_t *keys_in, scalar_t *keys_out, int64_t n, \
bool descending, int64_t begin_bit, int64_t end_bit); \
template void unique( \
const scalar_t *input, scalar_t *output, \
int64_t *num_selected_out, int64_t num_items); \
template void run_length_encode( \
const scalar_t *input, scalar_t *output, int64_t *counts_out, \
int64_t *length_out, int64_t n);
AT_FORALL_SCALAR_TYPES_AND2(Bool, Half, AT_INSTATIATE_CUB_TEMPLATES)
namespace {
template <typename scalar_t>
struct SumOp {
__device__ scalar_t operator () (scalar_t a, scalar_t b) const {
return a + b;
}
};
}
template <typename input_t, typename output_t>
void inclusive_sum_truncating(const input_t *input, output_t *output, int64_t num_items) {
using NO_ROCM(at_cuda_detail)::cub::Sum;
inclusive_scan(input, output, Sum{}, num_items);
}
template void inclusive_sum_truncating(const int32_t *input, int32_t *output, int64_t num_items);
template void inclusive_sum_truncating(const int64_t *input, int64_t *output, int64_t num_items);
template void inclusive_sum_truncating(const int32_t *input, int64_t *output, int64_t num_items);
template <typename input_t, typename output_t>
void exclusive_sum_in_common_type(const input_t *input, output_t *output, int64_t num_items) {
using scalar_t = std::common_type_t<input_t, output_t>;
exclusive_scan(input, output, SumOp<scalar_t>{}, scalar_t(0), num_items);
}
template void exclusive_sum_in_common_type(const int32_t *input, int32_t *output, int64_t num_items);
template void exclusive_sum_in_common_type(const int64_t *input, int64_t *output, int64_t num_items);
template void exclusive_sum_in_common_type(const bool *input, int64_t *output, int64_t num_items);
template void exclusive_sum_in_common_type(const uint8_t *input, int64_t *output, int64_t num_items);
}}} // namespace at::cuda::cub
|
d7e22f3b3f9e31c0f5bd8bc28f697d27884f8894.hip | // !!! This is a file automatically generated by hipify!!!
#include "Sphere.h"
#include "Material.h"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#define PI (3.1415927f)
__global__ void constructEnvironmentGPU_Sphere(Hittable** this_d, Vec3 center, float radius, Material** mat_d)
{
if (blockIdx.x * blockDim.x + threadIdx.x == 0)
{
(*this_d) = new Sphere(center, radius, mat_d);
}
}
__global__ void destroyEnvironmentGPU_Sphere(Hittable** this_d)
{
if (blockIdx.x * blockDim.x + threadIdx.x == 0)
{
delete (*this_d);
}
}
Sphere::Sphere(Vec3 center, float radius, Material* mat) : center(center), radius(radius), mat(mat), mat_d(mat != nullptr ? mat->GetPtrGPU() : nullptr)
{
#ifndef __CUDA_ARCH__
constructEnvironment();
#endif
}
__device__ Sphere::Sphere(Vec3 center, float radius, Material** mat_d) : center(center), radius(radius), mat_d(mat_d)
{
}
Sphere::~Sphere()
{
#ifndef __CUDA_ARCH__
destroyEnvironment();
#endif
}
bool Sphere::Hit(const Ray3& ray, float tMin, float tMax, HitRecord& hRec) const
{
Vec3 oc = ray.Origin() - center;
float b = Vec3::Dot(oc, ray.Direction());
float c = oc.LengthSquared() - radius * radius;
float discriminant = b * b - c;
if (discriminant > 0.0f)
{
float temp = -b - sqrt(discriminant);
if (temp > tMin && temp < tMax)
{
hRec.SetT(temp);
hRec.SetPoint(ray.PointAt(temp));
hRec.SetNormal(hRec.GetPoint() - center);
hRec.SetU(1.0f - (atan2(hRec.GetPoint().Z, hRec.GetPoint().X) + PI) / (2.0f * PI));
hRec.SetV((asin(hRec.GetPoint().Y) + PI / 2.0f) / PI);
#ifdef __CUDA_ARCH__
hRec.SetMaterial(mat_d);
#else
hRec.SetMaterialHost(mat);
#endif
return true;
}
temp = -b + sqrt(discriminant);
if (temp > tMin && temp < tMax)
{
hRec.SetT(temp);
hRec.SetPoint(ray.PointAt(temp));
hRec.SetNormal(hRec.GetPoint() - center);
hRec.SetU(1.0f - (atan2(hRec.GetPoint().Z, hRec.GetPoint().X) + PI) / (2.0f * PI));
hRec.SetV((asin(hRec.GetPoint().Y) + PI / 2.0f) / PI);
#ifdef __CUDA_ARCH__
hRec.SetMaterial(mat_d);
#else
hRec.SetMaterialHost(mat);
#endif
return true;
}
}
return false;
}
void Sphere::constructEnvironment()
{
hipMalloc(&this_d, sizeof(Hittable**));
hipLaunchKernelGGL(( constructEnvironmentGPU_Sphere), dim3(1), dim3(1), 0, 0, this_d, center, radius, mat_d);
hipDeviceSynchronize();
}
void Sphere::destroyEnvironment()
{
hipLaunchKernelGGL(( destroyEnvironmentGPU_Sphere), dim3(1), dim3(1), 0, 0, this_d);
hipFree(this_d);
hipDeviceSynchronize();
} | d7e22f3b3f9e31c0f5bd8bc28f697d27884f8894.cu | #include "Sphere.h"
#include "Material.h"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define PI (3.1415927f)
__global__ void constructEnvironmentGPU_Sphere(Hittable** this_d, Vec3 center, float radius, Material** mat_d)
{
if (blockIdx.x * blockDim.x + threadIdx.x == 0)
{
(*this_d) = new Sphere(center, radius, mat_d);
}
}
__global__ void destroyEnvironmentGPU_Sphere(Hittable** this_d)
{
if (blockIdx.x * blockDim.x + threadIdx.x == 0)
{
delete (*this_d);
}
}
Sphere::Sphere(Vec3 center, float radius, Material* mat) : center(center), radius(radius), mat(mat), mat_d(mat != nullptr ? mat->GetPtrGPU() : nullptr)
{
#ifndef __CUDA_ARCH__
constructEnvironment();
#endif
}
__device__ Sphere::Sphere(Vec3 center, float radius, Material** mat_d) : center(center), radius(radius), mat_d(mat_d)
{
}
Sphere::~Sphere()
{
#ifndef __CUDA_ARCH__
destroyEnvironment();
#endif
}
bool Sphere::Hit(const Ray3& ray, float tMin, float tMax, HitRecord& hRec) const
{
Vec3 oc = ray.Origin() - center;
float b = Vec3::Dot(oc, ray.Direction());
float c = oc.LengthSquared() - radius * radius;
float discriminant = b * b - c;
if (discriminant > 0.0f)
{
float temp = -b - sqrt(discriminant);
if (temp > tMin && temp < tMax)
{
hRec.SetT(temp);
hRec.SetPoint(ray.PointAt(temp));
hRec.SetNormal(hRec.GetPoint() - center);
hRec.SetU(1.0f - (atan2(hRec.GetPoint().Z, hRec.GetPoint().X) + PI) / (2.0f * PI));
hRec.SetV((asin(hRec.GetPoint().Y) + PI / 2.0f) / PI);
#ifdef __CUDA_ARCH__
hRec.SetMaterial(mat_d);
#else
hRec.SetMaterialHost(mat);
#endif
return true;
}
temp = -b + sqrt(discriminant);
if (temp > tMin && temp < tMax)
{
hRec.SetT(temp);
hRec.SetPoint(ray.PointAt(temp));
hRec.SetNormal(hRec.GetPoint() - center);
hRec.SetU(1.0f - (atan2(hRec.GetPoint().Z, hRec.GetPoint().X) + PI) / (2.0f * PI));
hRec.SetV((asin(hRec.GetPoint().Y) + PI / 2.0f) / PI);
#ifdef __CUDA_ARCH__
hRec.SetMaterial(mat_d);
#else
hRec.SetMaterialHost(mat);
#endif
return true;
}
}
return false;
}
void Sphere::constructEnvironment()
{
cudaMalloc(&this_d, sizeof(Hittable**));
constructEnvironmentGPU_Sphere<<<1, 1>>>(this_d, center, radius, mat_d);
cudaDeviceSynchronize();
}
void Sphere::destroyEnvironment()
{
destroyEnvironmentGPU_Sphere<<<1, 1>>>(this_d);
cudaFree(this_d);
cudaDeviceSynchronize();
} |
5bf4dca6295fbd3dba742518b2ff3ce5bf90f4d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ static void calc_linear_kernel_predict(int objs,int coords,double* x,int objs_train,double* x_train,double* out){
int id=blockDim.x * blockIdx.x + threadIdx.x;
int i=id/objs;
int j=id%objs;
if (i<objs_train){
double r=1.0;
for (int k=0;k<coords;k++){
r += x_train[coords*i+k] * x[coords*j+k];
}
out[id]=r;
}
} | 5bf4dca6295fbd3dba742518b2ff3ce5bf90f4d7.cu | #include "includes.h"
__global__ static void calc_linear_kernel_predict(int objs,int coords,double* x,int objs_train,double* x_train,double* out){
int id=blockDim.x * blockIdx.x + threadIdx.x;
int i=id/objs;
int j=id%objs;
if (i<objs_train){
double r=1.0;
for (int k=0;k<coords;k++){
r += x_train[coords*i+k] * x[coords*j+k];
}
out[id]=r;
}
} |
b28af472e2735d918bc96201e03f560e33707bc5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
__global__ void reduce(int *g_idata, int *g_odata)
{
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
for (unsigned int s = 1; s < blockDim.x; s *= 2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
if (tid == 0)
{
g_odata[blockIdx.x] = sdata[0];
}
}
int main(int argc, char *argv[])
{
// We assume that the element number is the power of 2 for simplification.
const int elemNum = 1 << 22;
int arraySize = elemNum * sizeof(int);
// host memory
int *h_idata;
int sum;
// device memory
int *d_idata;
int *d_odata;
// initialize input data
h_idata = (int *) malloc(arraySize);
FILE *fp;
if((fp = fopen(argv[1], "rb")) == NULL)
{
printf("Can not open input file!\n");
exit(0);
}
for (int i = 0; i < elemNum; ++i)
{
fscanf(fp, "%d", &h_idata[i]);
}
fclose(fp);
// copy input data from CPU to GPU
hipMalloc((void **) &d_idata, arraySize);
hipMemcpy(d_idata, h_idata, arraySize, hipMemcpyHostToDevice);
int threadNum = 0;
int blockNum = 0;
// calculate the threadNum and blockNum for the first kernel
hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties, 0);
int maxThreadsPerBlock = deviceProperties.maxThreadsPerBlock; // maxThreadsPerBlock = 1024 on K20X
threadNum = (elemNum > maxThreadsPerBlock)? maxThreadsPerBlock: elemNum;
blockNum = (int) ceil((double) elemNum / threadNum); // blockNum = 4096
// the number of output elements of the first kernel is blockNum
hipMalloc((void **) &d_odata, blockNum * sizeof(int));
// use GPU of id=0
hipSetDevice(0);
// parameters for the first kernel
dim3 gridDim(blockNum, 1, 1);
dim3 blockDim(threadNum, 1, 1);
int sMemSize = threadNum * sizeof(int);
hipEvent_t start, stop;
float stepTime;
float totalTime = 0;
// create event for recording GPU execution time
hipEventCreate(&start);
hipEventCreate(&stop);
// execute the first kernel and set the GPU timer
hipEventRecord(start, 0);
hipLaunchKernelGGL(( reduce), dim3(gridDim), dim3(blockDim), sMemSize, 0, d_idata, d_odata);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// calculate the execution time of the first kernel
hipEventElapsedTime(&stepTime, start, stop);
totalTime += stepTime;
hipEventDestroy(start);
hipEventDestroy(stop);
// calculate the threadNum and blockNum for the next kernel
threadNum = (blockNum > maxThreadsPerBlock)? maxThreadsPerBlock: blockNum;
blockNum = (int) ceil((double) blockNum / threadNum);
while(blockNum >= 1) {
// parameters for the current kernel
dim3 gridDim(blockNum, 1, 1);
dim3 blockDim(threadNum, 1, 1);
sMemSize = threadNum * sizeof(int);
// create event for recording GPU execution time
hipEventCreate(&start);
hipEventCreate(&stop);
// execute the current kernel and set the GPU timer
hipEventRecord(start, 0);
hipLaunchKernelGGL(( reduce), dim3(gridDim), dim3(blockDim), sMemSize, 0, d_odata, d_odata);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// calculate the execution time of the current kernel
hipEventElapsedTime(&stepTime, start, stop);
totalTime += stepTime;
hipEventDestroy(start);
hipEventDestroy(stop);
if (blockNum == 1) break;
// calculate the threadNum and blockNum for the next kernel
threadNum = (blockNum > maxThreadsPerBlock)? maxThreadsPerBlock: blockNum;
blockNum = (int) ceil((double) blockNum / threadNum);
}
// copy result back to CPU
hipMemcpy(&sum, d_odata, sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_idata);
hipFree(d_odata);
float bandwidth = elemNum * sizeof(int) / (totalTime / 1000) / 1024 / 1024 / 1024;
printf("%d %fms %fGB/s\n", sum, totalTime, bandwidth);
return 0;
}
| b28af472e2735d918bc96201e03f560e33707bc5.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void reduce(int *g_idata, int *g_odata)
{
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
for (unsigned int s = 1; s < blockDim.x; s *= 2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
if (tid == 0)
{
g_odata[blockIdx.x] = sdata[0];
}
}
int main(int argc, char *argv[])
{
// We assume that the element number is the power of 2 for simplification.
const int elemNum = 1 << 22;
int arraySize = elemNum * sizeof(int);
// host memory
int *h_idata;
int sum;
// device memory
int *d_idata;
int *d_odata;
// initialize input data
h_idata = (int *) malloc(arraySize);
FILE *fp;
if((fp = fopen(argv[1], "rb")) == NULL)
{
printf("Can not open input file!\n");
exit(0);
}
for (int i = 0; i < elemNum; ++i)
{
fscanf(fp, "%d", &h_idata[i]);
}
fclose(fp);
// copy input data from CPU to GPU
cudaMalloc((void **) &d_idata, arraySize);
cudaMemcpy(d_idata, h_idata, arraySize, cudaMemcpyHostToDevice);
int threadNum = 0;
int blockNum = 0;
// calculate the threadNum and blockNum for the first kernel
cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, 0);
int maxThreadsPerBlock = deviceProperties.maxThreadsPerBlock; // maxThreadsPerBlock = 1024 on K20X
threadNum = (elemNum > maxThreadsPerBlock)? maxThreadsPerBlock: elemNum;
blockNum = (int) ceil((double) elemNum / threadNum); // blockNum = 4096
// the number of output elements of the first kernel is blockNum
cudaMalloc((void **) &d_odata, blockNum * sizeof(int));
// use GPU of id=0
cudaSetDevice(0);
// parameters for the first kernel
dim3 gridDim(blockNum, 1, 1);
dim3 blockDim(threadNum, 1, 1);
int sMemSize = threadNum * sizeof(int);
cudaEvent_t start, stop;
float stepTime;
float totalTime = 0;
// create event for recording GPU execution time
cudaEventCreate(&start);
cudaEventCreate(&stop);
// execute the first kernel and set the GPU timer
cudaEventRecord(start, 0);
reduce<<<gridDim, blockDim, sMemSize>>>(d_idata, d_odata);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculate the execution time of the first kernel
cudaEventElapsedTime(&stepTime, start, stop);
totalTime += stepTime;
cudaEventDestroy(start);
cudaEventDestroy(stop);
// calculate the threadNum and blockNum for the next kernel
threadNum = (blockNum > maxThreadsPerBlock)? maxThreadsPerBlock: blockNum;
blockNum = (int) ceil((double) blockNum / threadNum);
while(blockNum >= 1) {
// parameters for the current kernel
dim3 gridDim(blockNum, 1, 1);
dim3 blockDim(threadNum, 1, 1);
sMemSize = threadNum * sizeof(int);
// create event for recording GPU execution time
cudaEventCreate(&start);
cudaEventCreate(&stop);
// execute the current kernel and set the GPU timer
cudaEventRecord(start, 0);
reduce<<<gridDim, blockDim, sMemSize>>>(d_odata, d_odata);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculate the execution time of the current kernel
cudaEventElapsedTime(&stepTime, start, stop);
totalTime += stepTime;
cudaEventDestroy(start);
cudaEventDestroy(stop);
if (blockNum == 1) break;
// calculate the threadNum and blockNum for the next kernel
threadNum = (blockNum > maxThreadsPerBlock)? maxThreadsPerBlock: blockNum;
blockNum = (int) ceil((double) blockNum / threadNum);
}
// copy result back to CPU
cudaMemcpy(&sum, d_odata, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_idata);
cudaFree(d_odata);
float bandwidth = elemNum * sizeof(int) / (totalTime / 1000) / 1024 / 1024 / 1024;
printf("%d %fms %fGB/s\n", sum, totalTime, bandwidth);
return 0;
}
|
939d59416b15f13ea10b35675fb9fdf332c691ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 4
//Radix Sorting
#include "utils.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <thrust/sort.h>
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 4 5 2 3 6]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
//#define USE_THRUST
__global__ void print_kernel(unsigned int *d_out)
{
printf("%d ", d_out[threadIdx.x]);
}
__global__ void histo_kernel(unsigned int * d_out, unsigned int* const d_in,
unsigned int shift, const unsigned int numElems)
{
unsigned int mask = 1 << shift;
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId >= numElems) return;
int bin = (d_in[myId] & mask) >> shift;
atomicAdd(&d_out[bin], 1);
}
// Blelloch Scan - described in lecture
__global__ void sumscan_kernel(unsigned int * d_in, const size_t numBins, const unsigned int numElems)
{
int myId = threadIdx.x;
if (myId >= numElems) return;
extern __shared__ float sdata[];
sdata[myId] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
for (int d = 1; d < numBins; d *= 2) {
if (myId >= d) {
sdata[myId] += sdata[myId - d];
}
__syncthreads();
}
if (myId == 0) d_in[0] = 0;
else d_in[myId] = sdata[myId - 1]; //inclusive->exclusive
}
__global__ void makescan_kernel(unsigned int * d_in, unsigned int *d_scan,
unsigned int shift, const unsigned int numElems)
{
unsigned int mask = 1 << shift;
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId >= numElems) return;
d_scan[myId] = ((d_in[myId] & mask) >> shift) ? 0 : 1;
}
__global__ void move_kernel(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const unsigned int numElems,
unsigned int* const d_histogram,
unsigned int* const d_scaned,
unsigned int shift)
{
unsigned int mask = 1 << shift;
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId >= numElems) return;
// Important!
// Algorithm described in 7.4 of http://wykvictor.github.io/2016/04/03/Cuda-2.html
int des_id = 0;
if ((d_inputVals[myId] & mask) >> shift) {
des_id = myId + d_histogram[1] - d_scaned[myId];
} else {
des_id = d_scaned[myId];
}
d_outputVals[des_id] = d_inputVals[myId];
d_outputPos[des_id] = d_inputPos[myId];
}
#ifdef USE_THRUST
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
// Thrust vectors wrapping raw GPU data
thrust::device_ptr<unsigned int> d_inputVals_p(d_inputVals);
thrust::device_ptr<unsigned int> d_inputPos_p(d_inputPos);
thrust::host_vector<unsigned int> h_inputVals_vec(d_inputVals_p,
d_inputVals_p + numElems);
thrust::host_vector<unsigned int> h_inputPos_vec(d_inputPos_p,
d_inputPos_p + numElems);
// ?? device_vector is wrong
thrust::sort_by_key(h_inputVals_vec.begin(), h_inputVals_vec.end(), h_inputPos_vec.begin());
checkCudaErrors(hipMemcpy(d_outputVals, thrust::raw_pointer_cast(&h_inputVals_vec[0]),
numElems * sizeof(unsigned int), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_outputPos, thrust::raw_pointer_cast(&h_inputPos_vec[0]),
numElems * sizeof(unsigned int), hipMemcpyHostToDevice));
}
#else
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
// use how many bits/time to compare(maybe 4 is most efficent)
const int numBits = 1; //??
const int numBins = 1 << numBits;
const int m = 1 << 10;
int blocks = ceil((float)numElems / m);
printf("m %d blocks %d\n", m ,blocks);
// allocate GPU memory
unsigned int *d_binHistogram;
checkCudaErrors(hipMalloc(&d_binHistogram, sizeof(unsigned int)* numBins));
// not numBins --> different from CPU version
thrust::device_vector<unsigned int> d_scan(numElems);
// Loop bits: only guaranteed to work for numBits that are multiples of 2
for (unsigned int i = 0; i < 8 * sizeof(unsigned int); i++) {
//unsigned int mask = 1 << i;
checkCudaErrors(hipMemset(d_binHistogram, 0, sizeof(unsigned int)* numBins));
// 1) perform histogram of data & mask into bins
histo_kernel << <blocks, m >> >(d_binHistogram, d_inputVals, i, numElems);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
//print_kernel << <1, 2 >> >(d_binHistogram);
//printf("\n");
// 2) perform exclusive prefix sum (scan) on binHistogram to get starting
// location for each bin
sumscan_kernel << <1, numBins, sizeof(unsigned int)* numBins>> >(d_binHistogram, numBins, numElems);
//print_kernel << <1, 2 >> >(d_binHistogram);
//printf("\n");
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// 3) Gather everything into the correct location
// need to move vals and positions
makescan_kernel << <blocks, m >> >(d_inputVals, thrust::raw_pointer_cast(&d_scan[0]), i, numElems);
//print_kernel << <1, 4 >> >(thrust::raw_pointer_cast(&d_scan[0]));
//printf("\n");
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// segmented scan described in http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html
//thrust::host_vector<unsigned int> h_scan = d_scan;
//printf("%d %d %d\n", h_scan[0], h_scan[1], h_scan[2]);
thrust::exclusive_scan(d_scan.begin(), d_scan.end(), d_scan.begin());
//print_kernel << <1, 4 >> >(thrust::raw_pointer_cast(&d_scan[0]));
// printf("\n");
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
//thrust::host_vector<unsigned int> h_scan_2 = d_scan;
//printf("%d %d %d\n", h_scan_2[0], h_scan_2[1], h_scan_2[2]);
move_kernel << <blocks, m >> >(d_inputVals, d_inputPos, d_outputVals, d_outputPos,
numElems, d_binHistogram, thrust::raw_pointer_cast(&d_scan[0]), i);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(d_inputVals, d_outputVals, numElems * sizeof(unsigned int), hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(d_inputPos, d_outputPos, numElems * sizeof(unsigned int), hipMemcpyDeviceToDevice));
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
// Free memory
checkCudaErrors(hipFree(d_binHistogram));
}
#endif | 939d59416b15f13ea10b35675fb9fdf332c691ac.cu | //Udacity HW 4
//Radix Sorting
#include "utils.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <thrust/sort.h>
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 4 5 2 3 6]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
//#define USE_THRUST
__global__ void print_kernel(unsigned int *d_out)
{
printf("%d ", d_out[threadIdx.x]);
}
__global__ void histo_kernel(unsigned int * d_out, unsigned int* const d_in,
unsigned int shift, const unsigned int numElems)
{
unsigned int mask = 1 << shift;
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId >= numElems) return;
int bin = (d_in[myId] & mask) >> shift;
atomicAdd(&d_out[bin], 1);
}
// Blelloch Scan - described in lecture
__global__ void sumscan_kernel(unsigned int * d_in, const size_t numBins, const unsigned int numElems)
{
int myId = threadIdx.x;
if (myId >= numElems) return;
extern __shared__ float sdata[];
sdata[myId] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
for (int d = 1; d < numBins; d *= 2) {
if (myId >= d) {
sdata[myId] += sdata[myId - d];
}
__syncthreads();
}
if (myId == 0) d_in[0] = 0;
else d_in[myId] = sdata[myId - 1]; //inclusive->exclusive
}
__global__ void makescan_kernel(unsigned int * d_in, unsigned int *d_scan,
unsigned int shift, const unsigned int numElems)
{
unsigned int mask = 1 << shift;
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId >= numElems) return;
d_scan[myId] = ((d_in[myId] & mask) >> shift) ? 0 : 1;
}
__global__ void move_kernel(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const unsigned int numElems,
unsigned int* const d_histogram,
unsigned int* const d_scaned,
unsigned int shift)
{
unsigned int mask = 1 << shift;
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId >= numElems) return;
// Important!
// Algorithm described in 7.4 of http://wykvictor.github.io/2016/04/03/Cuda-2.html
int des_id = 0;
if ((d_inputVals[myId] & mask) >> shift) {
des_id = myId + d_histogram[1] - d_scaned[myId];
} else {
des_id = d_scaned[myId];
}
d_outputVals[des_id] = d_inputVals[myId];
d_outputPos[des_id] = d_inputPos[myId];
}
#ifdef USE_THRUST
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
// Thrust vectors wrapping raw GPU data
thrust::device_ptr<unsigned int> d_inputVals_p(d_inputVals);
thrust::device_ptr<unsigned int> d_inputPos_p(d_inputPos);
thrust::host_vector<unsigned int> h_inputVals_vec(d_inputVals_p,
d_inputVals_p + numElems);
thrust::host_vector<unsigned int> h_inputPos_vec(d_inputPos_p,
d_inputPos_p + numElems);
// ?? device_vector is wrong
thrust::sort_by_key(h_inputVals_vec.begin(), h_inputVals_vec.end(), h_inputPos_vec.begin());
checkCudaErrors(cudaMemcpy(d_outputVals, thrust::raw_pointer_cast(&h_inputVals_vec[0]),
numElems * sizeof(unsigned int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_outputPos, thrust::raw_pointer_cast(&h_inputPos_vec[0]),
numElems * sizeof(unsigned int), cudaMemcpyHostToDevice));
}
#else
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
// use how many bits/time to compare(maybe 4 is most efficent)
const int numBits = 1; //??
const int numBins = 1 << numBits;
const int m = 1 << 10;
int blocks = ceil((float)numElems / m);
printf("m %d blocks %d\n", m ,blocks);
// allocate GPU memory
unsigned int *d_binHistogram;
checkCudaErrors(cudaMalloc(&d_binHistogram, sizeof(unsigned int)* numBins));
// not numBins --> different from CPU version
thrust::device_vector<unsigned int> d_scan(numElems);
// Loop bits: only guaranteed to work for numBits that are multiples of 2
for (unsigned int i = 0; i < 8 * sizeof(unsigned int); i++) {
//unsigned int mask = 1 << i;
checkCudaErrors(cudaMemset(d_binHistogram, 0, sizeof(unsigned int)* numBins));
// 1) perform histogram of data & mask into bins
histo_kernel << <blocks, m >> >(d_binHistogram, d_inputVals, i, numElems);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
//print_kernel << <1, 2 >> >(d_binHistogram);
//printf("\n");
// 2) perform exclusive prefix sum (scan) on binHistogram to get starting
// location for each bin
sumscan_kernel << <1, numBins, sizeof(unsigned int)* numBins>> >(d_binHistogram, numBins, numElems);
//print_kernel << <1, 2 >> >(d_binHistogram);
//printf("\n");
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// 3) Gather everything into the correct location
// need to move vals and positions
makescan_kernel << <blocks, m >> >(d_inputVals, thrust::raw_pointer_cast(&d_scan[0]), i, numElems);
//print_kernel << <1, 4 >> >(thrust::raw_pointer_cast(&d_scan[0]));
//printf("\n");
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// segmented scan described in http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html
//thrust::host_vector<unsigned int> h_scan = d_scan;
//printf("%d %d %d\n", h_scan[0], h_scan[1], h_scan[2]);
thrust::exclusive_scan(d_scan.begin(), d_scan.end(), d_scan.begin());
//print_kernel << <1, 4 >> >(thrust::raw_pointer_cast(&d_scan[0]));
// printf("\n");
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
//thrust::host_vector<unsigned int> h_scan_2 = d_scan;
//printf("%d %d %d\n", h_scan_2[0], h_scan_2[1], h_scan_2[2]);
move_kernel << <blocks, m >> >(d_inputVals, d_inputPos, d_outputVals, d_outputPos,
numElems, d_binHistogram, thrust::raw_pointer_cast(&d_scan[0]), i);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(d_inputVals, d_outputVals, numElems * sizeof(unsigned int), cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(d_inputPos, d_outputPos, numElems * sizeof(unsigned int), cudaMemcpyDeviceToDevice));
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
// Free memory
checkCudaErrors(cudaFree(d_binHistogram));
}
#endif |
eb012ac4e6a65d51f6e3401631a35dc4a67d3b5f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cosineSimilarityCuda(float3* pDotProducts, size_t pSize, float* results) {
int instance = blockIdx.x * blockDim.x + threadIdx.x;
while (instance < pSize) {
results[instance] = pDotProducts[instance].y / (sqrtf(pDotProducts[instance].x)* sqrtf(pDotProducts[instance].z));
instance += gridDim.x;
}
} | eb012ac4e6a65d51f6e3401631a35dc4a67d3b5f.cu | #include "includes.h"
__global__ void cosineSimilarityCuda(float3* pDotProducts, size_t pSize, float* results) {
int instance = blockIdx.x * blockDim.x + threadIdx.x;
while (instance < pSize) {
results[instance] = pDotProducts[instance].y / (sqrtf(pDotProducts[instance].x)* sqrtf(pDotProducts[instance].z));
instance += gridDim.x;
}
} |
2bc57626765a4a4173d8d1409d2b5ff1588924b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Matrix multiply device code
#include <assert.h>
#include <math.h>
#include "utils.h"
#include "types.h"
using namespace std;
#define TW BLOCKDIM_X//32
//#define TWx 32
//#define TWy 8
#include <stdio.h>
__global__ void matMul(int N, _DOUBLE_ *C, _DOUBLE_ *A, _DOUBLE_ *B) {
__shared__ double As[TW][TW], Bs[TW][TW];
int ty = threadIdx.y, tx = threadIdx.x;
int by = blockIdx.y, bx = blockIdx.x;
double Cij = 0;
double Cij_4 = 0;
double Cij_8 = 0;
double Cij_12 = 0;
double Cij_16 = 0;
double Cij_20 = 0;
double Cij_24 = 0;
double Cij_28 = 0;
if(N%TW || BLOCKDIM_X != BLOCKDIM_Y*8)
{
//ty = min(7,ty);
int I = min(N-1,by*TW + ty);
int J= min(N-1,bx*TW + tx);
// for (int kk=0; kk<ceilf(float (N)/TW); kk++)
if((I < N) && (J < N)) {
#pragma unroll
for (int kk=0; kk<(N/TW + int(bool(N%TW))); kk++)
{
As[ty][tx] = __ldg(&A[I*N + kk*TW + tx]);
Bs[ty][tx] = __ldg(&B[(kk*TW+ty)*N + J]);
//As[ty+4][tx] = __ldg(&A[(I+4)*N + kk*TW + tx]);
//Bs[ty+4][tx] = __ldg(&B[(kk*TW+ty+4)*N + J]);
////printf("As = %f A = %f\n",As[ty][tx],A[I*N+kk*TWx+tx]);
////printf("ty = %d\n",ty);
//As[ty+8][tx] = __ldg(&A[(I+8)*N + kk*TW+tx]);
//Bs[ty+8][tx] = __ldg(&B[(kk*TW+ty+8)*N + J]);
//As[ty+12][tx] = __ldg(&A[(I+12)*N + kk*TW+tx]);
//Bs[ty+12][tx] = __ldg(&B[(kk*TW+ty+12)*N + J]);
////printf("ty_8\n");
//As[ty+16][tx] = __ldg(&A[(I+16)*N + kk*TW+tx]);
//Bs[ty+16][tx] = __ldg(&B[(kk*TW+ty+16)*N + J]);
//As[ty+20][tx] = __ldg(&A[(I+20)*N + kk*TW+tx]);
//Bs[ty+20][tx] = __ldg(&B[(kk*TW+ty+20)*N + J]);
////printf("ty_16\n");
//As[ty+24][tx] = __ldg(&A[(I+24)*N + kk*TW+tx]);
//Bs[ty+24][tx] = __ldg(&B[(kk*TW+ty+24)*N + J]);
//As[ty+28][tx] = __ldg(&A[(I+28)*N + kk*TW+tx]);
//Bs[ty+28][tx] = __ldg(&B[(kk*TW+ty+28)*N + J]);
//printf("ty_24\n");
__syncthreads();
//for (int k=0; k<TW && k+kk*TW<N; k++)
#pragma unroll
for (int k=0; k<min(TW,N-kk*TW); k++)
{
Cij += As[ty][k] * Bs[k][tx];
//Cij_4 += As[ty+4][k] * Bs[k][tx];
//Cij_8 += As[ty+8][k] * Bs[k][tx];
//Cij_12 += As[ty+12][k] * Bs[k][tx];
//Cij_16 += As[ty+16][k] * Bs[k][tx];
//Cij_20 += As[ty+20][k] * Bs[k][tx];
//Cij_24 += As[ty+24][k] * Bs[k][tx];
//Cij_28 += As[ty+28][k] * Bs[k][tx];
}
__syncthreads();
}
C[I*N + J] = Cij;
//C[(I+4)*N + J] = Cij_4;
//C[(I+8)*N + J] = Cij_8;
//C[(I+12)*N + J] = Cij_12;
//C[(I+16)*N + J] = Cij_16;
//C[(I+20)*N + J] = Cij_20;
//C[(I+24)*N + J] = Cij_24;
//C[(I+28)*N + J] = Cij_28;
}
}
else
{
int I = by*TW + ty;
int J = bx*TW + tx;
// for (int kk=0; kk<ceilf(float (N)/TW); kk++)
if((I < N) && (J < N)) {
#pragma unroll
for (int kk=0; kk<N/TW; kk++)
{
As[ty][tx] = __ldg(&A[I*N + kk*TW + tx]);
Bs[ty][tx] = __ldg(&B[(kk*TW+ty)*N + J]);
As[ty+(TW/8)][tx] = __ldg(&A[(I+(TW/8))*N + kk*TW + tx]);
Bs[ty+(TW/8)][tx] = __ldg(&B[(kk*TW+ty+(TW/8))*N + J]);
//printf("As = %f A = %f\n",As[ty][tx],A[I*N+kk*TWx+tx]);
//printf("ty = %d\n",ty);
As[ty+(TW/4)][tx] = __ldg(&A[(I+(TW/4))*N + kk*TW+tx]);
Bs[ty+(TW/4)][tx] = __ldg(&B[(kk*TW+ty+(TW/4))*N + J]);
As[ty+(3*TW/8)][tx] = __ldg(&A[(I+(3*TW/8))*N + kk*TW+tx]);
Bs[ty+(3*TW/8)][tx] = __ldg(&B[(kk*TW+ty+(3*TW/8))*N + J]);
//printf("ty_8\n");
As[ty+(TW/2)][tx] = __ldg(&A[(I+(TW/2))*N + kk*TW+tx]);
Bs[ty+(TW/2)][tx] = __ldg(&B[(kk*TW+ty+(TW/2))*N + J]);
As[ty+(5*TW/8)][tx] = __ldg(&A[(I+(5*TW/8))*N + kk*TW+tx]);
Bs[ty+(5*TW/8)][tx] = __ldg(&B[(kk*TW+ty+(5*TW/8))*N + J]);
//printf("ty_16\n");
As[ty+(3*TW/4)][tx] = __ldg(&A[(I+(3*TW/4))*N + kk*TW+tx]);
Bs[ty+(3*TW/4)][tx] = __ldg(&B[(kk*TW+ty+(3*TW/4))*N + J]);
As[ty+(7*TW/8)][tx] = __ldg(&A[(I+(7*TW/8))*N + kk*TW+tx]);
Bs[ty+(7*TW/8)][tx] = __ldg(&B[(kk*TW+ty+(7*TW/8))*N + J]);
__syncthreads();
//for (int k=0; k<TW && k+kk*TW<N; k++)
#pragma unroll
for (int k=0; k<TW; k++)
{
Cij += As[ty][k] * Bs[k][tx];
Cij_4 += As[ty+(TW/8)][k] * Bs[k][tx];
Cij_8 += As[ty+(TW/4)][k] * Bs[k][tx];
Cij_12 += As[ty+(3*TW/8)][k] * Bs[k][tx];
Cij_16 += As[ty+(TW/2)][k] * Bs[k][tx];
Cij_20 += As[ty+(5*TW/8)][k] * Bs[k][tx];
Cij_24 += As[ty+(3*TW/4)][k] * Bs[k][tx];
Cij_28 += As[ty+(7*TW/8)][k] * Bs[k][tx];
}
__syncthreads();
}
C[I*N + J] = Cij;
C[(I+(TW/8))*N + J] = Cij_4;
C[(I+(TW/4))*N + J] = Cij_8;
C[(I+(3*TW/8))*N + J] = Cij_12;
C[(I+(TW/2))*N + J] = Cij_16;
C[(I+(5*TW/8))*N + J] = Cij_20;
C[(I+(3*TW/4))*N + J] = Cij_24;
C[(I+(7*TW/8))*N + J] = Cij_28;
}
}
}
| 2bc57626765a4a4173d8d1409d2b5ff1588924b3.cu | // Matrix multiply device code
#include <assert.h>
#include <math.h>
#include "utils.h"
#include "types.h"
using namespace std;
#define TW BLOCKDIM_X//32
//#define TWx 32
//#define TWy 8
#include <stdio.h>
__global__ void matMul(int N, _DOUBLE_ *C, _DOUBLE_ *A, _DOUBLE_ *B) {
__shared__ double As[TW][TW], Bs[TW][TW];
int ty = threadIdx.y, tx = threadIdx.x;
int by = blockIdx.y, bx = blockIdx.x;
double Cij = 0;
double Cij_4 = 0;
double Cij_8 = 0;
double Cij_12 = 0;
double Cij_16 = 0;
double Cij_20 = 0;
double Cij_24 = 0;
double Cij_28 = 0;
if(N%TW || BLOCKDIM_X != BLOCKDIM_Y*8)
{
//ty = min(7,ty);
int I = min(N-1,by*TW + ty);
int J= min(N-1,bx*TW + tx);
// for (int kk=0; kk<ceilf(float (N)/TW); kk++)
if((I < N) && (J < N)) {
#pragma unroll
for (int kk=0; kk<(N/TW + int(bool(N%TW))); kk++)
{
As[ty][tx] = __ldg(&A[I*N + kk*TW + tx]);
Bs[ty][tx] = __ldg(&B[(kk*TW+ty)*N + J]);
//As[ty+4][tx] = __ldg(&A[(I+4)*N + kk*TW + tx]);
//Bs[ty+4][tx] = __ldg(&B[(kk*TW+ty+4)*N + J]);
////printf("As = %f A = %f\n",As[ty][tx],A[I*N+kk*TWx+tx]);
////printf("ty = %d\n",ty);
//As[ty+8][tx] = __ldg(&A[(I+8)*N + kk*TW+tx]);
//Bs[ty+8][tx] = __ldg(&B[(kk*TW+ty+8)*N + J]);
//As[ty+12][tx] = __ldg(&A[(I+12)*N + kk*TW+tx]);
//Bs[ty+12][tx] = __ldg(&B[(kk*TW+ty+12)*N + J]);
////printf("ty_8\n");
//As[ty+16][tx] = __ldg(&A[(I+16)*N + kk*TW+tx]);
//Bs[ty+16][tx] = __ldg(&B[(kk*TW+ty+16)*N + J]);
//As[ty+20][tx] = __ldg(&A[(I+20)*N + kk*TW+tx]);
//Bs[ty+20][tx] = __ldg(&B[(kk*TW+ty+20)*N + J]);
////printf("ty_16\n");
//As[ty+24][tx] = __ldg(&A[(I+24)*N + kk*TW+tx]);
//Bs[ty+24][tx] = __ldg(&B[(kk*TW+ty+24)*N + J]);
//As[ty+28][tx] = __ldg(&A[(I+28)*N + kk*TW+tx]);
//Bs[ty+28][tx] = __ldg(&B[(kk*TW+ty+28)*N + J]);
//printf("ty_24\n");
__syncthreads();
//for (int k=0; k<TW && k+kk*TW<N; k++)
#pragma unroll
for (int k=0; k<min(TW,N-kk*TW); k++)
{
Cij += As[ty][k] * Bs[k][tx];
//Cij_4 += As[ty+4][k] * Bs[k][tx];
//Cij_8 += As[ty+8][k] * Bs[k][tx];
//Cij_12 += As[ty+12][k] * Bs[k][tx];
//Cij_16 += As[ty+16][k] * Bs[k][tx];
//Cij_20 += As[ty+20][k] * Bs[k][tx];
//Cij_24 += As[ty+24][k] * Bs[k][tx];
//Cij_28 += As[ty+28][k] * Bs[k][tx];
}
__syncthreads();
}
C[I*N + J] = Cij;
//C[(I+4)*N + J] = Cij_4;
//C[(I+8)*N + J] = Cij_8;
//C[(I+12)*N + J] = Cij_12;
//C[(I+16)*N + J] = Cij_16;
//C[(I+20)*N + J] = Cij_20;
//C[(I+24)*N + J] = Cij_24;
//C[(I+28)*N + J] = Cij_28;
}
}
else
{
int I = by*TW + ty;
int J = bx*TW + tx;
// for (int kk=0; kk<ceilf(float (N)/TW); kk++)
if((I < N) && (J < N)) {
#pragma unroll
for (int kk=0; kk<N/TW; kk++)
{
As[ty][tx] = __ldg(&A[I*N + kk*TW + tx]);
Bs[ty][tx] = __ldg(&B[(kk*TW+ty)*N + J]);
As[ty+(TW/8)][tx] = __ldg(&A[(I+(TW/8))*N + kk*TW + tx]);
Bs[ty+(TW/8)][tx] = __ldg(&B[(kk*TW+ty+(TW/8))*N + J]);
//printf("As = %f A = %f\n",As[ty][tx],A[I*N+kk*TWx+tx]);
//printf("ty = %d\n",ty);
As[ty+(TW/4)][tx] = __ldg(&A[(I+(TW/4))*N + kk*TW+tx]);
Bs[ty+(TW/4)][tx] = __ldg(&B[(kk*TW+ty+(TW/4))*N + J]);
As[ty+(3*TW/8)][tx] = __ldg(&A[(I+(3*TW/8))*N + kk*TW+tx]);
Bs[ty+(3*TW/8)][tx] = __ldg(&B[(kk*TW+ty+(3*TW/8))*N + J]);
//printf("ty_8\n");
As[ty+(TW/2)][tx] = __ldg(&A[(I+(TW/2))*N + kk*TW+tx]);
Bs[ty+(TW/2)][tx] = __ldg(&B[(kk*TW+ty+(TW/2))*N + J]);
As[ty+(5*TW/8)][tx] = __ldg(&A[(I+(5*TW/8))*N + kk*TW+tx]);
Bs[ty+(5*TW/8)][tx] = __ldg(&B[(kk*TW+ty+(5*TW/8))*N + J]);
//printf("ty_16\n");
As[ty+(3*TW/4)][tx] = __ldg(&A[(I+(3*TW/4))*N + kk*TW+tx]);
Bs[ty+(3*TW/4)][tx] = __ldg(&B[(kk*TW+ty+(3*TW/4))*N + J]);
As[ty+(7*TW/8)][tx] = __ldg(&A[(I+(7*TW/8))*N + kk*TW+tx]);
Bs[ty+(7*TW/8)][tx] = __ldg(&B[(kk*TW+ty+(7*TW/8))*N + J]);
__syncthreads();
//for (int k=0; k<TW && k+kk*TW<N; k++)
#pragma unroll
for (int k=0; k<TW; k++)
{
Cij += As[ty][k] * Bs[k][tx];
Cij_4 += As[ty+(TW/8)][k] * Bs[k][tx];
Cij_8 += As[ty+(TW/4)][k] * Bs[k][tx];
Cij_12 += As[ty+(3*TW/8)][k] * Bs[k][tx];
Cij_16 += As[ty+(TW/2)][k] * Bs[k][tx];
Cij_20 += As[ty+(5*TW/8)][k] * Bs[k][tx];
Cij_24 += As[ty+(3*TW/4)][k] * Bs[k][tx];
Cij_28 += As[ty+(7*TW/8)][k] * Bs[k][tx];
}
__syncthreads();
}
C[I*N + J] = Cij;
C[(I+(TW/8))*N + J] = Cij_4;
C[(I+(TW/4))*N + J] = Cij_8;
C[(I+(3*TW/8))*N + J] = Cij_12;
C[(I+(TW/2))*N + J] = Cij_16;
C[(I+(5*TW/8))*N + J] = Cij_20;
C[(I+(3*TW/4))*N + J] = Cij_24;
C[(I+(7*TW/8))*N + J] = Cij_28;
}
}
}
|
d3b55dd172990986122e8d7584ecd3ac7b8c8de2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include"k.h"
#include<math.h>
#include </usr/local/cuda/include/cuda.h>
#define CMCPYHTD hipMemcpyHostToDevice
#define CMCPYDTH hipMemcpyDeviceToHost
#define BLOCK_WIDTH 16
// Note: I've adapted this from https://github.com/MTB90
// He has also done a "block version" which performs up to 4xfaster
// for very large matrices. Perhaps this could be adapted too one day
// I have mostly stripped out a lot of the code, cut out the preceeding
// matrix stuff, and tried to simplify things
extern "C" K gpu_floydwarshall(K matrix);
/**Kernel for wake gpu
*
* @param reps dummy variable only to perform some action
*/
__global__ void wake_gpu_kernel(int reps)
{
I idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= reps) return;
}
/**Kernel for parallel Floyd Warshall algorithm on gpu
*
* @param u number vertex of which is performed relaxation paths [v1, v2]
* @param n number of vertices in the graph G:=(V,E), n := |V(G)|
* @param d matrix of shortest paths d(G)
*/
__global__ void fw_kernel(const unsigned int u, const unsigned int n, int * const d)
{
I v1 = blockDim.y * blockIdx.y + threadIdx.y;
I v2 = blockDim.x * blockIdx.x + threadIdx.x;
if (v1 < n && v2 < n)
{
I newPath = d[v1 * n + u] + d[u * n + v2];
I oldPath = d[v1 * n + v2];
if (oldPath > newPath)
{
d[v1 * n + v2] = newPath;
}
}
}
K gpu_floydwarshall(K matrix)
{
unsigned int V = sqrt(matrix->n);
unsigned int n = V;
// Alloc host data for G - graph, d - matrix of shortest paths
unsigned int size = V * V;
I *d = (int *) malloc (sizeof(int) * size);
I *dev_d = 0;
hipStream_t cpyStream;
// Choose which GPU to run on, change this on a multi-GPU system.
hipSetDevice(0);
// Initialize the grid and block dimensions here
dim3 dimGrid((n - 1) / BLOCK_WIDTH + 1, (n - 1) / BLOCK_WIDTH + 1);
dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH);
// Create new stream to copy data
hipStreamCreate(&cpyStream);
// Allocate GPU buffers for matrix of shortest paths d)
hipMalloc((void**)&dev_d, n * n * sizeof(int));
// Wake up gpu
hipLaunchKernelGGL(( wake_gpu_kernel), dim3(1), dim3(dimBlock), 0, 0, 32);
// Copy input from host memory to GPU buffers.
I *host_memoryd = (int*)&(kI(matrix)[0]);
hipMemcpyAsync(dev_d, host_memoryd, n * n * sizeof(int), CMCPYHTD, cpyStream);
// hipDeviceSynchronize waits for the kernel to finish, and returns
hipDeviceSynchronize();
// set preference for larger L1 cache and smaller shared memory
hipFuncSetCacheConfig(fw_kernel, hipFuncCachePreferL1 );
for (int u = 0; u <= (n-1); ++u)
{
hipLaunchKernelGGL(( fw_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, u, n, dev_d);
}
// Check for any errors launching the kernel
hipGetLastError();
// copy mem from gpu back to host
hipMemcpy(host_memoryd, dev_d, n * n * sizeof(int), CMCPYDTH);
// free memory on gpu
hipFree(dev_d);
// Delete allocated memory on host
free(d);
R r1(matrix);
}
| d3b55dd172990986122e8d7584ecd3ac7b8c8de2.cu | #include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include"k.h"
#include<math.h>
#include </usr/local/cuda/include/cuda.h>
#define CMCPYHTD cudaMemcpyHostToDevice
#define CMCPYDTH cudaMemcpyDeviceToHost
#define BLOCK_WIDTH 16
// Note: I've adapted this from https://github.com/MTB90
// He has also done a "block version" which performs up to 4xfaster
// for very large matrices. Perhaps this could be adapted too one day
// I have mostly stripped out a lot of the code, cut out the preceeding
// matrix stuff, and tried to simplify things
extern "C" K gpu_floydwarshall(K matrix);
/**Kernel for wake gpu
*
* @param reps dummy variable only to perform some action
*/
__global__ void wake_gpu_kernel(int reps)
{
I idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= reps) return;
}
/**Kernel for parallel Floyd Warshall algorithm on gpu
*
* @param u number vertex of which is performed relaxation paths [v1, v2]
* @param n number of vertices in the graph G:=(V,E), n := |V(G)|
* @param d matrix of shortest paths d(G)
*/
__global__ void fw_kernel(const unsigned int u, const unsigned int n, int * const d)
{
I v1 = blockDim.y * blockIdx.y + threadIdx.y;
I v2 = blockDim.x * blockIdx.x + threadIdx.x;
if (v1 < n && v2 < n)
{
I newPath = d[v1 * n + u] + d[u * n + v2];
I oldPath = d[v1 * n + v2];
if (oldPath > newPath)
{
d[v1 * n + v2] = newPath;
}
}
}
K gpu_floydwarshall(K matrix)
{
unsigned int V = sqrt(matrix->n);
unsigned int n = V;
// Alloc host data for G - graph, d - matrix of shortest paths
unsigned int size = V * V;
I *d = (int *) malloc (sizeof(int) * size);
I *dev_d = 0;
cudaStream_t cpyStream;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaSetDevice(0);
// Initialize the grid and block dimensions here
dim3 dimGrid((n - 1) / BLOCK_WIDTH + 1, (n - 1) / BLOCK_WIDTH + 1);
dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH);
// Create new stream to copy data
cudaStreamCreate(&cpyStream);
// Allocate GPU buffers for matrix of shortest paths d)
cudaMalloc((void**)&dev_d, n * n * sizeof(int));
// Wake up gpu
wake_gpu_kernel<<<1, dimBlock>>>(32);
// Copy input from host memory to GPU buffers.
I *host_memoryd = (int*)&(kI(matrix)[0]);
cudaMemcpyAsync(dev_d, host_memoryd, n * n * sizeof(int), CMCPYHTD, cpyStream);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
cudaDeviceSynchronize();
// set preference for larger L1 cache and smaller shared memory
cudaFuncSetCacheConfig(fw_kernel, cudaFuncCachePreferL1 );
for (int u = 0; u <= (n-1); ++u)
{
fw_kernel<<<dimGrid, dimBlock>>>(u, n, dev_d);
}
// Check for any errors launching the kernel
cudaGetLastError();
// copy mem from gpu back to host
cudaMemcpy(host_memoryd, dev_d, n * n * sizeof(int), CMCPYDTH);
// free memory on gpu
cudaFree(dev_d);
// Delete allocated memory on host
free(d);
R r1(matrix);
}
|
7487dd7e074302a58bcaa72e52acf4e72429ae1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <hipcub/hipcub.hpp>
#include "oneflow/core/ndarray/ndarray_reduce_impl.h"
#include "oneflow/core/ndarray/binary_func.h"
#include "oneflow/core/common/preprocessor.h"
#include "oneflow/core/common/shape.h"
#include "oneflow/core/common/permutation_iterator.h"
namespace cub {
struct Prod {
template<typename T>
__host__ __device__ __forceinline__ T operator()(const T& a, const T& b) const {
return a * b;
}
};
struct Any {
template<typename T>
__host__ __device__ __forceinline__ T operator()(const T& a, const T& b) const {
return a || b;
}
};
struct All {
template<typename T>
__host__ __device__ __forceinline__ T operator()(const T& a, const T& b) const {
return a && b;
}
};
} // namespace cub
namespace oneflow {
namespace {
template<template<typename> class R, typename T, typename K>
__global__ void MatrixColReduceBy1ThreadPerColumn(K num_elems, K num_cols, const T* in, T* out) {
CUDA_1D_KERNEL_LOOP_T(K, j, num_cols) {
K index = j;
T sum = in[index];
for (index += num_cols; index < num_elems; index += num_cols) {
sum = R<T>::Invoke(sum, in[index]);
}
out[j] = sum;
}
}
template<typename T>
struct WithAlign2 {
union {
T value;
int32_t padding;
};
};
template<template<typename> class R, typename T, typename K>
__global__ void MatrixColReduceByWarpBlock(K num_elems, K num_cols, const T* in, T* out) {
const K thread_col = threadIdx.x % kCudaWarpSize;
const K thread_row = threadIdx.x / kCudaWarpSize;
const K thread_dim_row = blockDim.x / kCudaWarpSize;
const K num_valid_threads = thread_dim_row * num_cols; // ASSERT: always <= num_elems
const K col = blockIdx.x * kCudaWarpSize + thread_col;
__shared__ WithAlign2<T> partial_values[kCudaWarpSize * kCudaWarpSize];
if (col < num_cols) {
K index = thread_row * num_cols + col;
T val = in[index];
for (index += num_valid_threads; index < num_elems; index += num_valid_threads) {
val = R<T>::Invoke(val, in[index]);
}
partial_values[threadIdx.x].value = val;
}
__syncthreads();
if (col < num_cols && thread_row == 0) {
int index = thread_col;
T val = partial_values[index].value;
for (index += kCudaWarpSize; index < blockDim.x; index += kCudaWarpSize) {
val = R<T>::Invoke(val, partial_values[index].value);
}
out[col] = val;
}
}
template<template<typename> class R, typename T, typename K>
void MatrixColReduceBy1BlockLayer(DeviceCtx* ctx, K num_elems, K num_cols, const T* in, T* out) {
CHECK_LE(num_cols, kCudaMaxBlocksNum * kCudaWarpSize);
const K num_rows = num_elems / num_cols;
CHECK_GT(num_rows, 0);
if (num_rows < kCudaWarpSize) {
RUN_CUDA_KERNEL((MatrixColReduceBy1ThreadPerColumn<R, T, K>), ctx, num_cols, num_elems,
num_cols, in, out);
} else {
const int num_blocks = (num_cols + kCudaWarpSize - 1) / kCudaWarpSize;
const int num_threads = kCudaWarpSize * kCudaWarpSize;
auto Reduce = &MatrixColReduceByWarpBlock<R, T, K>;
hipLaunchKernelGGL(( Reduce), dim3(num_blocks), dim3(num_threads), 0, ctx->cuda_stream(), num_elems, num_cols, in, out);
}
}
const static int32_t kNumRows4OneBlockLayer = kCudaWarpSize * kCudaWarpSize;
const static int32_t kNumCols4OneBlockLayer = kCudaMaxBlocksNum * kCudaWarpSize / 2;
template<template<typename> class R, typename T, typename K>
void MatrixColReduceK(DeviceCtx* ctx, K num_rows, K num_cols, const T* in, T* out, T* tmp) {
K num_elems = num_rows * num_cols;
if (num_rows < kNumRows4OneBlockLayer || num_cols > kNumCols4OneBlockLayer) {
MatrixColReduceBy1BlockLayer<R, T, K>(ctx, num_elems, num_cols, in, out);
} else {
int scale_shift = 1;
for (; true; ++scale_shift) {
if ((num_rows >> scale_shift) < kNumRows4OneBlockLayer) { break; }
if ((num_cols << scale_shift) > kNumCols4OneBlockLayer) { break; }
}
MatrixColReduceBy1BlockLayer<R, T, K>(ctx, num_elems, (num_cols << scale_shift), in, tmp);
// recursively calls MatrixColReduceK(...) log32(num_rows) times at most
MatrixColReduceK<R, T, K>(ctx, (1 << scale_shift), num_cols, tmp, out, tmp);
}
}
template<template<typename> class R, typename T>
void MatrixColReduce(DeviceCtx* ctx, int64_t num_rows, int64_t num_cols, const T* in, T* out,
T* tmp) {
if (IsKernelSafeInt32(num_rows * num_cols)) {
return MatrixColReduceK<R, T, int32_t>(ctx, num_rows, num_cols, in, out, tmp);
} else {
return MatrixColReduceK<R, T, int64_t>(ctx, num_rows, num_cols, in, out, tmp);
}
}
} // namespace
template<typename T, template<typename> class binary_func>
struct CubFunctor4BianryFunc;
#define SPECIALIZE_CUB_FUNCTOR_4_BINARY_FUNC(func_name) \
template<typename T> \
struct CubFunctor4BianryFunc<T, BinaryFunc##func_name> final { \
using type = cub::func_name; \
};
OF_PP_FOR_EACH_ATOMIC(SPECIALIZE_CUB_FUNCTOR_4_BINARY_FUNC, REDUCE_BINARY_FUNC_NAME_SEQ);
#undef SPECIALIZE_CUB_FUNCTOR_4_BINARY_FUNC
struct RowOffsetFunctor final {
OF_DEVICE_FUNC explicit RowOffsetFunctor(int32_t num_cols) : num_cols_(num_cols) {}
OF_DEVICE_FUNC int32_t operator()(const int32_t& x) const { return x * num_cols_; }
int32_t num_cols_;
};
template<typename T, template<typename> class binary_func>
struct NdarrayScalarReduce<DeviceType::kGPU, T, binary_func> final {
static bool Matched(const XpuVarNdarray<T>& y, const XpuVarNdarray<const T>& x) {
return y.shape().ElemNum() == 1;
}
static void Reduce(DeviceCtx* ctx, const XpuVarNdarray<T>& y, const XpuVarNdarray<const T>& x,
const XpuVarNdarray<T>& tmp_storage) {
CHECK(Matched(y, x));
size_t x_size = x.shape().ElemNum();
size_t tmp_storage_bytes = 0;
auto DoReduce = [&](T* tmp_storage_ptr) {
int retcode =
hipcub::DeviceReduce::Reduce(tmp_storage_ptr, tmp_storage_bytes, x.ptr(), y.ptr(), x_size,
typename CubFunctor4BianryFunc<T, binary_func>::type(),
UnitOfBinaryFunc<T, binary_func>::Val(), ctx->cuda_stream());
CHECK_EQ(retcode, 0) << "hipcub::DeviceSegmentedReduce::Reduce error";
};
DoReduce(nullptr);
CHECK_GE(tmp_storage.shape().ElemNum() * sizeof(T), tmp_storage_bytes);
DoReduce(tmp_storage.ptr());
}
};
template<typename T, template<typename> class binary_func>
struct NdarrayMatrixRowReduce<DeviceType::kGPU, T, binary_func> final {
static bool Matched(const XpuVarNdarray<T>& y, const XpuVarNdarray<const T>& x) {
if (y.shape().ElemNum() > GetMaxVal<int32_t>()) { return false; }
if (x.shape().NumAxes() != 2) { return false; }
if (y.shape().NumAxes() != 2) { return false; }
return x.shape().At(0) == y.shape().At(0) && y.shape().At(1) == 1;
}
static void Reduce(DeviceCtx* ctx, const XpuVarNdarray<T>& y, const XpuVarNdarray<const T>& x,
const XpuVarNdarray<T>& tmp_storage) {
CHECK(Matched(y, x));
int32_t num_rows = y.shape().ElemNum();
int32_t num_cols = x.shape().ElemNum() / y.shape().ElemNum();
RowOffsetFunctor get_row_offset(num_cols);
hipcub::CountingInputIterator<int32_t> counting_intput_it(0);
hipcub::TransformInputIterator<int32_t, RowOffsetFunctor, hipcub::CountingInputIterator<int32_t>>
transform_input_iter(counting_intput_it, get_row_offset);
size_t tmp_storage_bytes = 0;
auto DoReduce = [&](T* tmp_storage_ptr) {
int retcode = hipcub::DeviceSegmentedReduce::Reduce(
tmp_storage_ptr, tmp_storage_bytes, x.ptr(), y.ptr(), num_rows, transform_input_iter,
transform_input_iter + 1, typename CubFunctor4BianryFunc<T, binary_func>::type(),
UnitOfBinaryFunc<T, binary_func>::Val(), ctx->cuda_stream());
CHECK_EQ(retcode, 0) << "hipcub::DeviceSegmentedReduce::Reduce error";
};
DoReduce(nullptr);
CHECK_GE(tmp_storage.shape().ElemNum() * sizeof(T), tmp_storage_bytes);
DoReduce(tmp_storage.ptr());
}
};
template<typename T, template<typename> class binary_func>
struct NdarrayMatrixColReduce<DeviceType::kGPU, T, binary_func> final {
static bool Matched(const XpuVarNdarray<T>& y, const XpuVarNdarray<const T>& x) {
if (y.shape().ElemNum() > GetMaxVal<int32_t>()) { return false; }
if (x.shape().NumAxes() != 2) { return false; }
if (y.shape().NumAxes() != 2) { return false; }
return y.shape().At(0) == 1 && x.shape().At(1) == y.shape().At(1);
}
struct XY2YXFunctor final {
__host__ __device__ XY2YXFunctor(int32_t dim_x, int32_t dim_y) : dim_x_(dim_x), dim_y_(dim_y) {}
__host__ __device__ int32_t operator()(const int32_t& idx) const {
const int32_t y = idx / dim_x_;
const int32_t x = idx % dim_x_;
return x * dim_y_ + y;
}
int32_t dim_x_;
int32_t dim_y_;
};
static void Reduce(DeviceCtx* ctx, const XpuVarNdarray<T>& y, const XpuVarNdarray<const T>& x,
const XpuVarNdarray<T>& tmp_storage) {
CHECK(Matched(y, x));
int64_t num_rows = x.shape().At(0);
int64_t num_cols = x.shape().At(1);
if (num_cols < kNumCols4OneBlockLayer) {
return MatrixColReduce<binary_func, T>(ctx, num_rows, num_cols, x.host_ptr(), y.host_ptr(),
tmp_storage.host_ptr());
}
RowOffsetFunctor get_row_offset(num_rows);
hipcub::CountingInputIterator<int32_t> counting_intput_it(0);
hipcub::TransformInputIterator<int32_t, RowOffsetFunctor, hipcub::CountingInputIterator<int32_t>>
transform_input_iter(counting_intput_it, get_row_offset);
XY2YXFunctor xy2yx(x.shape().At(0), x.shape().At(1));
using XY2YxIndexIter =
hipcub::TransformInputIterator<int32_t, XY2YXFunctor, hipcub::CountingInputIterator<int32_t>>;
XY2YxIndexIter xy2yx_iter(counting_intput_it, xy2yx);
PermutationIterator<const T, const T*, XY2YxIndexIter> x_iter(x.ptr(), xy2yx_iter);
size_t tmp_storage_bytes = 0;
auto DoReduce = [&](T* tmp_storage_ptr) {
int retcode = hipcub::DeviceSegmentedReduce::Reduce(
tmp_storage_ptr, tmp_storage_bytes, x_iter, y.ptr(), num_cols, transform_input_iter,
transform_input_iter + 1, typename CubFunctor4BianryFunc<T, binary_func>::type(),
UnitOfBinaryFunc<T, binary_func>::Val(), ctx->cuda_stream());
CHECK_EQ(retcode, 0) << "hipcub::DeviceSegmentedReduce::Reduce error";
};
DoReduce(nullptr);
CHECK_GE(tmp_storage.shape().ElemNum() * sizeof(T), tmp_storage_bytes);
DoReduce(tmp_storage.ptr());
}
};
template<typename T, template<typename> class binary_func>
struct NdarrayXYZCubeXZReduce<DeviceType::kGPU, T, binary_func> final {
static bool Matched(const XpuVarNdarray<T>& y, const XpuVarNdarray<const T>& x) {
if (y.shape().ElemNum() > GetMaxVal<int32_t>()) { return false; }
if (x.shape().NumAxes() != 3) { return false; }
if (y.shape().NumAxes() != 3) { return false; }
return y.shape().At(0) == 1 && x.shape().At(1) == y.shape().At(1) && y.shape().At(2) == 1;
}
struct XYZ2YxzFunctor final {
__host__ __device__ XYZ2YxzFunctor(int32_t dim_x, int32_t dim_y, int32_t dim_z)
: dim_z_(dim_z), dim_xz_(dim_x * dim_z), dim_yz_(dim_y * dim_z) {}
__host__ __device__ int32_t operator()(const int32_t& idx) const {
const int32_t y = idx / dim_xz_;
const int32_t xz_idx = idx % dim_xz_;
const int32_t x = xz_idx / dim_z_;
const int32_t z = xz_idx % dim_z_;
return x * dim_yz_ + y * dim_z_ + z;
}
int32_t dim_z_;
int32_t dim_xz_;
int32_t dim_yz_;
};
static void Reduce(DeviceCtx* ctx, const XpuVarNdarray<T>& y, const XpuVarNdarray<const T>& x,
const XpuVarNdarray<T>& tmp_storage) {
CHECK(Matched(y, x));
int32_t num_rows = y.shape().ElemNum();
int32_t num_cols = x.shape().ElemNum() / y.shape().ElemNum();
RowOffsetFunctor get_row_offset(num_cols);
hipcub::CountingInputIterator<int32_t> counting_intput_it(0);
hipcub::TransformInputIterator<int32_t, RowOffsetFunctor, hipcub::CountingInputIterator<int32_t>>
transform_input_iter(counting_intput_it, get_row_offset);
XYZ2YxzFunctor xyz2yxz(x.shape().At(0), x.shape().At(1), x.shape().At(2));
using XYZ2YxzIndexIter =
hipcub::TransformInputIterator<int32_t, XYZ2YxzFunctor, hipcub::CountingInputIterator<int32_t>>;
XYZ2YxzIndexIter xyz2yxz_iter(counting_intput_it, xyz2yxz);
PermutationIterator<const T, const T*, XYZ2YxzIndexIter> x_iter(x.ptr(), xyz2yxz_iter);
size_t tmp_storage_bytes = 0;
auto DoReduce = [&](T* tmp_storage_ptr) {
int retcode = hipcub::DeviceSegmentedReduce::Reduce(
tmp_storage_ptr, tmp_storage_bytes, x_iter, y.ptr(), num_rows, transform_input_iter,
transform_input_iter + 1, typename CubFunctor4BianryFunc<T, binary_func>::type(),
UnitOfBinaryFunc<T, binary_func>::Val(), ctx->cuda_stream());
CHECK_EQ(retcode, 0) << "hipcub::DeviceSegmentedReduce::Reduce error";
};
DoReduce(nullptr);
CHECK_GE(tmp_storage.shape().ElemNum() * sizeof(T), tmp_storage_bytes);
DoReduce(tmp_storage.ptr());
}
};
namespace {
template<typename T, int NDIMS, template<typename> class binary_func>
__global__ void NdarrayReduceGpuInplaceReduceAxis(const XpuReducedNdarray<T, NDIMS> dst_reduced,
const XpuReducedNdarray<T, NDIMS> x, int axis) {
NdarrayReduceCore<T, NDIMS, binary_func>::ReduceAxis(dst_reduced, x, axis);
}
} // namespace
template<typename T, int NDIMS, template<typename> class binary_func>
struct NdarrayReduceCoreWrapper<DeviceType::kGPU, T, NDIMS, binary_func> final {
static void ReduceAxis(DeviceCtx* ctx, const XpuReducedNdarray<T, NDIMS>& dst_reduced,
const XpuReducedNdarray<T, NDIMS>& x, int axis) {
size_t n = x.host_shape().HostElemNum();
RUN_CUDA_KERNEL((NdarrayReduceGpuInplaceReduceAxis<T, NDIMS, binary_func>), ctx, n, dst_reduced,
x, axis);
}
};
#define INSTANTIATE_NDARRAY_REDUCE_IMPL(dtype, binary_func) \
template struct NdarrayScalarReduce<DeviceType::kGPU, OF_PP_PAIR_FIRST(dtype), binary_func>; \
template struct NdarrayMatrixRowReduce<DeviceType::kGPU, OF_PP_PAIR_FIRST(dtype), binary_func>; \
template struct NdarrayMatrixColReduce<DeviceType::kGPU, OF_PP_PAIR_FIRST(dtype), binary_func>; \
template struct NdarrayXYZCubeXZReduce<DeviceType::kGPU, OF_PP_PAIR_FIRST(dtype), binary_func>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_NDARRAY_REDUCE_IMPL,
ARITHMETIC_DATA_TYPE_SEQ HALF_DATA_TYPE_SEQ,
REDUCE_BINARY_FUNC_SEQ);
#define INSTANTIATE_NDARRAY_REDUCE_CORE_WRAPPER(dtype_pair, NDIMS, binary_func) \
template struct NdarrayReduceCoreWrapper<DeviceType::kGPU, OF_PP_PAIR_FIRST(dtype_pair), NDIMS, \
binary_func>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_NDARRAY_REDUCE_CORE_WRAPPER,
ARITHMETIC_DATA_TYPE_SEQ HALF_DATA_TYPE_SEQ, DIM_SEQ,
REDUCE_BINARY_FUNC_SEQ);
} // namespace oneflow
| 7487dd7e074302a58bcaa72e52acf4e72429ae1c.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <cub/cub.cuh>
#include "oneflow/core/ndarray/ndarray_reduce_impl.h"
#include "oneflow/core/ndarray/binary_func.h"
#include "oneflow/core/common/preprocessor.h"
#include "oneflow/core/common/shape.h"
#include "oneflow/core/common/permutation_iterator.h"
namespace cub {
struct Prod {
template<typename T>
__host__ __device__ __forceinline__ T operator()(const T& a, const T& b) const {
return a * b;
}
};
struct Any {
template<typename T>
__host__ __device__ __forceinline__ T operator()(const T& a, const T& b) const {
return a || b;
}
};
struct All {
template<typename T>
__host__ __device__ __forceinline__ T operator()(const T& a, const T& b) const {
return a && b;
}
};
} // namespace cub
namespace oneflow {
namespace {
template<template<typename> class R, typename T, typename K>
__global__ void MatrixColReduceBy1ThreadPerColumn(K num_elems, K num_cols, const T* in, T* out) {
CUDA_1D_KERNEL_LOOP_T(K, j, num_cols) {
K index = j;
T sum = in[index];
for (index += num_cols; index < num_elems; index += num_cols) {
sum = R<T>::Invoke(sum, in[index]);
}
out[j] = sum;
}
}
template<typename T>
struct WithAlign2 {
union {
T value;
int32_t padding;
};
};
template<template<typename> class R, typename T, typename K>
__global__ void MatrixColReduceByWarpBlock(K num_elems, K num_cols, const T* in, T* out) {
const K thread_col = threadIdx.x % kCudaWarpSize;
const K thread_row = threadIdx.x / kCudaWarpSize;
const K thread_dim_row = blockDim.x / kCudaWarpSize;
const K num_valid_threads = thread_dim_row * num_cols; // ASSERT: always <= num_elems
const K col = blockIdx.x * kCudaWarpSize + thread_col;
__shared__ WithAlign2<T> partial_values[kCudaWarpSize * kCudaWarpSize];
if (col < num_cols) {
K index = thread_row * num_cols + col;
T val = in[index];
for (index += num_valid_threads; index < num_elems; index += num_valid_threads) {
val = R<T>::Invoke(val, in[index]);
}
partial_values[threadIdx.x].value = val;
}
__syncthreads();
if (col < num_cols && thread_row == 0) {
int index = thread_col;
T val = partial_values[index].value;
for (index += kCudaWarpSize; index < blockDim.x; index += kCudaWarpSize) {
val = R<T>::Invoke(val, partial_values[index].value);
}
out[col] = val;
}
}
template<template<typename> class R, typename T, typename K>
void MatrixColReduceBy1BlockLayer(DeviceCtx* ctx, K num_elems, K num_cols, const T* in, T* out) {
CHECK_LE(num_cols, kCudaMaxBlocksNum * kCudaWarpSize);
const K num_rows = num_elems / num_cols;
CHECK_GT(num_rows, 0);
if (num_rows < kCudaWarpSize) {
RUN_CUDA_KERNEL((MatrixColReduceBy1ThreadPerColumn<R, T, K>), ctx, num_cols, num_elems,
num_cols, in, out);
} else {
const int num_blocks = (num_cols + kCudaWarpSize - 1) / kCudaWarpSize;
const int num_threads = kCudaWarpSize * kCudaWarpSize;
auto Reduce = &MatrixColReduceByWarpBlock<R, T, K>;
Reduce<<<num_blocks, num_threads, 0, ctx->cuda_stream()>>>(num_elems, num_cols, in, out);
}
}
const static int32_t kNumRows4OneBlockLayer = kCudaWarpSize * kCudaWarpSize;
const static int32_t kNumCols4OneBlockLayer = kCudaMaxBlocksNum * kCudaWarpSize / 2;
template<template<typename> class R, typename T, typename K>
void MatrixColReduceK(DeviceCtx* ctx, K num_rows, K num_cols, const T* in, T* out, T* tmp) {
K num_elems = num_rows * num_cols;
if (num_rows < kNumRows4OneBlockLayer || num_cols > kNumCols4OneBlockLayer) {
MatrixColReduceBy1BlockLayer<R, T, K>(ctx, num_elems, num_cols, in, out);
} else {
int scale_shift = 1;
for (; true; ++scale_shift) {
if ((num_rows >> scale_shift) < kNumRows4OneBlockLayer) { break; }
if ((num_cols << scale_shift) > kNumCols4OneBlockLayer) { break; }
}
MatrixColReduceBy1BlockLayer<R, T, K>(ctx, num_elems, (num_cols << scale_shift), in, tmp);
// recursively calls MatrixColReduceK(...) log32(num_rows) times at most
MatrixColReduceK<R, T, K>(ctx, (1 << scale_shift), num_cols, tmp, out, tmp);
}
}
template<template<typename> class R, typename T>
void MatrixColReduce(DeviceCtx* ctx, int64_t num_rows, int64_t num_cols, const T* in, T* out,
T* tmp) {
if (IsKernelSafeInt32(num_rows * num_cols)) {
return MatrixColReduceK<R, T, int32_t>(ctx, num_rows, num_cols, in, out, tmp);
} else {
return MatrixColReduceK<R, T, int64_t>(ctx, num_rows, num_cols, in, out, tmp);
}
}
} // namespace
template<typename T, template<typename> class binary_func>
struct CubFunctor4BianryFunc;
#define SPECIALIZE_CUB_FUNCTOR_4_BINARY_FUNC(func_name) \
template<typename T> \
struct CubFunctor4BianryFunc<T, BinaryFunc##func_name> final { \
using type = cub::func_name; \
};
OF_PP_FOR_EACH_ATOMIC(SPECIALIZE_CUB_FUNCTOR_4_BINARY_FUNC, REDUCE_BINARY_FUNC_NAME_SEQ);
#undef SPECIALIZE_CUB_FUNCTOR_4_BINARY_FUNC
struct RowOffsetFunctor final {
OF_DEVICE_FUNC explicit RowOffsetFunctor(int32_t num_cols) : num_cols_(num_cols) {}
OF_DEVICE_FUNC int32_t operator()(const int32_t& x) const { return x * num_cols_; }
int32_t num_cols_;
};
template<typename T, template<typename> class binary_func>
struct NdarrayScalarReduce<DeviceType::kGPU, T, binary_func> final {
static bool Matched(const XpuVarNdarray<T>& y, const XpuVarNdarray<const T>& x) {
return y.shape().ElemNum() == 1;
}
static void Reduce(DeviceCtx* ctx, const XpuVarNdarray<T>& y, const XpuVarNdarray<const T>& x,
const XpuVarNdarray<T>& tmp_storage) {
CHECK(Matched(y, x));
size_t x_size = x.shape().ElemNum();
size_t tmp_storage_bytes = 0;
auto DoReduce = [&](T* tmp_storage_ptr) {
int retcode =
cub::DeviceReduce::Reduce(tmp_storage_ptr, tmp_storage_bytes, x.ptr(), y.ptr(), x_size,
typename CubFunctor4BianryFunc<T, binary_func>::type(),
UnitOfBinaryFunc<T, binary_func>::Val(), ctx->cuda_stream());
CHECK_EQ(retcode, 0) << "cub::DeviceSegmentedReduce::Reduce error";
};
DoReduce(nullptr);
CHECK_GE(tmp_storage.shape().ElemNum() * sizeof(T), tmp_storage_bytes);
DoReduce(tmp_storage.ptr());
}
};
template<typename T, template<typename> class binary_func>
struct NdarrayMatrixRowReduce<DeviceType::kGPU, T, binary_func> final {
static bool Matched(const XpuVarNdarray<T>& y, const XpuVarNdarray<const T>& x) {
if (y.shape().ElemNum() > GetMaxVal<int32_t>()) { return false; }
if (x.shape().NumAxes() != 2) { return false; }
if (y.shape().NumAxes() != 2) { return false; }
return x.shape().At(0) == y.shape().At(0) && y.shape().At(1) == 1;
}
static void Reduce(DeviceCtx* ctx, const XpuVarNdarray<T>& y, const XpuVarNdarray<const T>& x,
const XpuVarNdarray<T>& tmp_storage) {
CHECK(Matched(y, x));
int32_t num_rows = y.shape().ElemNum();
int32_t num_cols = x.shape().ElemNum() / y.shape().ElemNum();
RowOffsetFunctor get_row_offset(num_cols);
cub::CountingInputIterator<int32_t> counting_intput_it(0);
cub::TransformInputIterator<int32_t, RowOffsetFunctor, cub::CountingInputIterator<int32_t>>
transform_input_iter(counting_intput_it, get_row_offset);
size_t tmp_storage_bytes = 0;
auto DoReduce = [&](T* tmp_storage_ptr) {
int retcode = cub::DeviceSegmentedReduce::Reduce(
tmp_storage_ptr, tmp_storage_bytes, x.ptr(), y.ptr(), num_rows, transform_input_iter,
transform_input_iter + 1, typename CubFunctor4BianryFunc<T, binary_func>::type(),
UnitOfBinaryFunc<T, binary_func>::Val(), ctx->cuda_stream());
CHECK_EQ(retcode, 0) << "cub::DeviceSegmentedReduce::Reduce error";
};
DoReduce(nullptr);
CHECK_GE(tmp_storage.shape().ElemNum() * sizeof(T), tmp_storage_bytes);
DoReduce(tmp_storage.ptr());
}
};
template<typename T, template<typename> class binary_func>
struct NdarrayMatrixColReduce<DeviceType::kGPU, T, binary_func> final {
static bool Matched(const XpuVarNdarray<T>& y, const XpuVarNdarray<const T>& x) {
if (y.shape().ElemNum() > GetMaxVal<int32_t>()) { return false; }
if (x.shape().NumAxes() != 2) { return false; }
if (y.shape().NumAxes() != 2) { return false; }
return y.shape().At(0) == 1 && x.shape().At(1) == y.shape().At(1);
}
struct XY2YXFunctor final {
__host__ __device__ XY2YXFunctor(int32_t dim_x, int32_t dim_y) : dim_x_(dim_x), dim_y_(dim_y) {}
__host__ __device__ int32_t operator()(const int32_t& idx) const {
const int32_t y = idx / dim_x_;
const int32_t x = idx % dim_x_;
return x * dim_y_ + y;
}
int32_t dim_x_;
int32_t dim_y_;
};
static void Reduce(DeviceCtx* ctx, const XpuVarNdarray<T>& y, const XpuVarNdarray<const T>& x,
const XpuVarNdarray<T>& tmp_storage) {
CHECK(Matched(y, x));
int64_t num_rows = x.shape().At(0);
int64_t num_cols = x.shape().At(1);
if (num_cols < kNumCols4OneBlockLayer) {
return MatrixColReduce<binary_func, T>(ctx, num_rows, num_cols, x.host_ptr(), y.host_ptr(),
tmp_storage.host_ptr());
}
RowOffsetFunctor get_row_offset(num_rows);
cub::CountingInputIterator<int32_t> counting_intput_it(0);
cub::TransformInputIterator<int32_t, RowOffsetFunctor, cub::CountingInputIterator<int32_t>>
transform_input_iter(counting_intput_it, get_row_offset);
XY2YXFunctor xy2yx(x.shape().At(0), x.shape().At(1));
using XY2YxIndexIter =
cub::TransformInputIterator<int32_t, XY2YXFunctor, cub::CountingInputIterator<int32_t>>;
XY2YxIndexIter xy2yx_iter(counting_intput_it, xy2yx);
PermutationIterator<const T, const T*, XY2YxIndexIter> x_iter(x.ptr(), xy2yx_iter);
size_t tmp_storage_bytes = 0;
auto DoReduce = [&](T* tmp_storage_ptr) {
int retcode = cub::DeviceSegmentedReduce::Reduce(
tmp_storage_ptr, tmp_storage_bytes, x_iter, y.ptr(), num_cols, transform_input_iter,
transform_input_iter + 1, typename CubFunctor4BianryFunc<T, binary_func>::type(),
UnitOfBinaryFunc<T, binary_func>::Val(), ctx->cuda_stream());
CHECK_EQ(retcode, 0) << "cub::DeviceSegmentedReduce::Reduce error";
};
DoReduce(nullptr);
CHECK_GE(tmp_storage.shape().ElemNum() * sizeof(T), tmp_storage_bytes);
DoReduce(tmp_storage.ptr());
}
};
template<typename T, template<typename> class binary_func>
struct NdarrayXYZCubeXZReduce<DeviceType::kGPU, T, binary_func> final {
static bool Matched(const XpuVarNdarray<T>& y, const XpuVarNdarray<const T>& x) {
if (y.shape().ElemNum() > GetMaxVal<int32_t>()) { return false; }
if (x.shape().NumAxes() != 3) { return false; }
if (y.shape().NumAxes() != 3) { return false; }
return y.shape().At(0) == 1 && x.shape().At(1) == y.shape().At(1) && y.shape().At(2) == 1;
}
struct XYZ2YxzFunctor final {
__host__ __device__ XYZ2YxzFunctor(int32_t dim_x, int32_t dim_y, int32_t dim_z)
: dim_z_(dim_z), dim_xz_(dim_x * dim_z), dim_yz_(dim_y * dim_z) {}
__host__ __device__ int32_t operator()(const int32_t& idx) const {
const int32_t y = idx / dim_xz_;
const int32_t xz_idx = idx % dim_xz_;
const int32_t x = xz_idx / dim_z_;
const int32_t z = xz_idx % dim_z_;
return x * dim_yz_ + y * dim_z_ + z;
}
int32_t dim_z_;
int32_t dim_xz_;
int32_t dim_yz_;
};
static void Reduce(DeviceCtx* ctx, const XpuVarNdarray<T>& y, const XpuVarNdarray<const T>& x,
const XpuVarNdarray<T>& tmp_storage) {
CHECK(Matched(y, x));
int32_t num_rows = y.shape().ElemNum();
int32_t num_cols = x.shape().ElemNum() / y.shape().ElemNum();
RowOffsetFunctor get_row_offset(num_cols);
cub::CountingInputIterator<int32_t> counting_intput_it(0);
cub::TransformInputIterator<int32_t, RowOffsetFunctor, cub::CountingInputIterator<int32_t>>
transform_input_iter(counting_intput_it, get_row_offset);
XYZ2YxzFunctor xyz2yxz(x.shape().At(0), x.shape().At(1), x.shape().At(2));
using XYZ2YxzIndexIter =
cub::TransformInputIterator<int32_t, XYZ2YxzFunctor, cub::CountingInputIterator<int32_t>>;
XYZ2YxzIndexIter xyz2yxz_iter(counting_intput_it, xyz2yxz);
PermutationIterator<const T, const T*, XYZ2YxzIndexIter> x_iter(x.ptr(), xyz2yxz_iter);
size_t tmp_storage_bytes = 0;
auto DoReduce = [&](T* tmp_storage_ptr) {
int retcode = cub::DeviceSegmentedReduce::Reduce(
tmp_storage_ptr, tmp_storage_bytes, x_iter, y.ptr(), num_rows, transform_input_iter,
transform_input_iter + 1, typename CubFunctor4BianryFunc<T, binary_func>::type(),
UnitOfBinaryFunc<T, binary_func>::Val(), ctx->cuda_stream());
CHECK_EQ(retcode, 0) << "cub::DeviceSegmentedReduce::Reduce error";
};
DoReduce(nullptr);
CHECK_GE(tmp_storage.shape().ElemNum() * sizeof(T), tmp_storage_bytes);
DoReduce(tmp_storage.ptr());
}
};
namespace {
template<typename T, int NDIMS, template<typename> class binary_func>
__global__ void NdarrayReduceGpuInplaceReduceAxis(const XpuReducedNdarray<T, NDIMS> dst_reduced,
const XpuReducedNdarray<T, NDIMS> x, int axis) {
NdarrayReduceCore<T, NDIMS, binary_func>::ReduceAxis(dst_reduced, x, axis);
}
} // namespace
template<typename T, int NDIMS, template<typename> class binary_func>
struct NdarrayReduceCoreWrapper<DeviceType::kGPU, T, NDIMS, binary_func> final {
static void ReduceAxis(DeviceCtx* ctx, const XpuReducedNdarray<T, NDIMS>& dst_reduced,
const XpuReducedNdarray<T, NDIMS>& x, int axis) {
size_t n = x.host_shape().HostElemNum();
RUN_CUDA_KERNEL((NdarrayReduceGpuInplaceReduceAxis<T, NDIMS, binary_func>), ctx, n, dst_reduced,
x, axis);
}
};
#define INSTANTIATE_NDARRAY_REDUCE_IMPL(dtype, binary_func) \
template struct NdarrayScalarReduce<DeviceType::kGPU, OF_PP_PAIR_FIRST(dtype), binary_func>; \
template struct NdarrayMatrixRowReduce<DeviceType::kGPU, OF_PP_PAIR_FIRST(dtype), binary_func>; \
template struct NdarrayMatrixColReduce<DeviceType::kGPU, OF_PP_PAIR_FIRST(dtype), binary_func>; \
template struct NdarrayXYZCubeXZReduce<DeviceType::kGPU, OF_PP_PAIR_FIRST(dtype), binary_func>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_NDARRAY_REDUCE_IMPL,
ARITHMETIC_DATA_TYPE_SEQ HALF_DATA_TYPE_SEQ,
REDUCE_BINARY_FUNC_SEQ);
#define INSTANTIATE_NDARRAY_REDUCE_CORE_WRAPPER(dtype_pair, NDIMS, binary_func) \
template struct NdarrayReduceCoreWrapper<DeviceType::kGPU, OF_PP_PAIR_FIRST(dtype_pair), NDIMS, \
binary_func>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_NDARRAY_REDUCE_CORE_WRAPPER,
ARITHMETIC_DATA_TYPE_SEQ HALF_DATA_TYPE_SEQ, DIM_SEQ,
REDUCE_BINARY_FUNC_SEQ);
} // namespace oneflow
|
CudaUtilities.hip | // !!! This is a file automatically generated by hipify!!!
#include "CudaUtilities_hip.cuh"
namespace Helix {
void *fixedCudaMalloc(size_t len) {
void *p;
if (hipMalloc(&p, len) == hipSuccess) return p;
return 0;
}
template<typename FN>
FN *cudaAlloCopy(FN *org, const size_t size) {
void* mem = fixedCudaMalloc(size);
FN *res = (FN *)mem;
hipMemcpy(res, org, size, hipMemcpyHostToDevice);
return res;
}
template float4 *cudaAlloCopy <float4>(float4 *, const size_t);
template float3 *cudaAlloCopy <float3>(float3 *, const size_t);
template double4 *cudaAlloCopy <double4>(double4 *, const size_t);
template double3 *cudaAlloCopy <double3>(double3 *, const size_t);
}
| CudaUtilities.cu | #include "CudaUtilities.cuh"
namespace Helix {
void *fixedCudaMalloc(size_t len) {
void *p;
if (cudaMalloc(&p, len) == cudaSuccess) return p;
return 0;
}
template<typename FN>
FN *cudaAlloCopy(FN *org, const size_t size) {
void* mem = fixedCudaMalloc(size);
FN *res = (FN *)mem;
cudaMemcpy(res, org, size, cudaMemcpyHostToDevice);
return res;
}
template float4 *cudaAlloCopy <float4>(float4 *, const size_t);
template float3 *cudaAlloCopy <float3>(float3 *, const size_t);
template double4 *cudaAlloCopy <double4>(double4 *, const size_t);
template double3 *cudaAlloCopy <double3>(double3 *, const size_t);
}
|
CudaUtilities.hip | // !!! This is a file automatically generated by hipify!!!
#include "CudaUtilities_hip.cuh"
namespace Helix {
void *fixedCudaMalloc(size_t len) {
void *p;
if (hipMalloc(&p, len) == hipSuccess) return p;
return 0;
}
template<typename FN>
FN *cudaAlloCopy(FN *org, const size_t size) {
void* mem = fixedCudaMalloc(size);
FN *res = (FN *)mem;
hipMemcpy(res, org, size, hipMemcpyHostToDevice);
return res;
}
template float4 *cudaAlloCopy <float4>(float4 *, const size_t);
template float3 *cudaAlloCopy <float3>(float3 *, const size_t);
template double4 *cudaAlloCopy <double4>(double4 *, const size_t);
template double3 *cudaAlloCopy <double3>(double3 *, const size_t);
}
| CudaUtilities.cuh | /// @file CudaUtilities.cuh
///
/// Collection of functions and macros that augment the base CUDA
/// libraries.
///
/// @author Tavis Maclellan
#ifndef CUDAUTILITIES_CUH
#define CUDAUTILITIES_CUH
/// Handles error codes returned from CUDA function calls.
#define cudaCheck(call) { cudaAssert((call), #call, __FILE__, __LINE__); }
/// Releases memory allocated on the GPU device.
#define cudaFREE(ptr) if(ptr!=NULL) { cudaFree(ptr);ptr=NULL;}
/// Asserts that a CUDA error code is not fatal. If the error code is not
/// a success, the function will output the error message along with the
/// file name and line number of the error. By default the function will
/// terminate the program if there is an error; override this behavior
/// by setting the abort parameter to false.
inline void cudaAssert(const cudaError_t code, char const *const func, const char* file, const int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA Error: %s(%d): %s: %s\n", file, line, func, cudaGetErrorString(code));
if (abort)
{
cudaDeviceReset();
exit(code);
}
}
}
#endif |
d534bb24fe1f39df91b39b688d451d7b1ae2e1d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <memory>
#include <stdio.h>
#include <iostream>
#include "params.h"
#include "particle.h"
#include "pic.h"
#include <hipfft.h>
#include <hipfftXt.h>
#include <unistd.h>
#include <hiprand/hiprand_kernel.h>
//declaring variables in constant memory
__device__ __constant__ int xDimC; //size of grid in x dimension
__device__ __constant__ int yDimC; //size of grid in y dimension
__device__ __constant__ int nxC; //num grid points in x direction
__device__ __constant__ int nyC; //num grid points in y direction
__device__ __constant__ int npxC; //num particles in x direction
__device__ __constant__ int npyC; //num particles in y direction
__device__ __constant__ int npC; //num particles in y direction
__device__ __constant__ double chargeC; //charge of superparticle
__device__ __constant__ double massC; //mass of superparticle
__device__ __constant__ double dxC; //size of cell in x direction
__device__ __constant__ double dyC; //size of cell in y direction
__device__ __constant__ double cellAreaC; //area of one cell
__device__ __constant__ double qmC; //ration of charge to mass of particle
__device__ __constant__ double dtC; //time increment between iterations
#define CHECK_ERROR(err)\
if (err != hipSuccess){ \
std::cerr << "ERROR:" << hipGetErrorString(err) << '\n'; \
exit (-1); \
}
#define CHECK_LAST_ERROR(err)\
{hipError_t = hipGetLastError(); \
if (err != hipSuccess) {\
std::cerr << hipGetErrorString(err) << '\n'; \
exit(-1); \
}\
}
/**
* \brief Function to perform double-precision atomic addition in CUDA.
* Necessary to use atomic add for doubles with compute capability < 6.0
*
* \param address of value to add to
* \param value to add
*/
__device__ double atomicAddDouble(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull;
unsigned long long int assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
/**
* \brief Cuda kernel to calculate current cell of each particle on the gpu
*
* \param positionX Array containing x-coordinates of all particles
* \param positionY Array containing y-coordinates of all particles
* \param cellX Array to store current cell in x direction of each particle
* \param cellY Array to store current cell in y direction of each particle
*/
__global__ void currentCell(double* positionX, double* positionY, int* cellX, int* cellY) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
//int idy = blockDim.y * blockIdx.y + threadIdx.y;
int tid = threadIdx.x;
//loading size of particles vector from grid
int n = npxC*npyC;
//initialising shared memory and loading array into it
extern __shared__ double sdata[];
sdata[2*tid] = (idx < n) ? positionX[idx] : 0;
sdata[2*tid+1] = (idx < n) ? positionY[idx] : 0;
__syncthreads();
if (idx < n) {
cellX[idx] = (int) (positionX[idx]/dxC); //integer arithmetic
cellY[idx] = (int) (positionY[idx]/dyC);
}
}
/*
* \brief Cuda kernel to assign particle charges to the nearest grid points
*
* \param positionX Array containing x-coordinates of all particles
* \param positionY Array containing y-coordinates of all particles
* \param cellX Array containing current cell in x direction of each particle
* \param cellY Array containing current cell in y direction of each particle
* \param density Array to store the charge density of each gridpoint
*/
__global__ void chargeAssignment(double* positionX, double* positionY, int* cellX, int* cellY, double* density) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
double invArea = 1.0/cellAreaC;
int n = npxC*npyC;
int y = nyC;
extern __shared__ double sdata[];
sdata[4*tid] = (idx < n) ? positionX[idx] : 0;
sdata[4*tid+1] = (idx < n) ? positionY[idx] : 0;
sdata[4*tid+2] = (idx < n) ? cellX[idx] : 0;
sdata[4*tid+3] = (idx < n) ? cellY[idx] : 0;
__syncthreads();
//adding to charge density of nearby points
if (idx < n) {
int xCell = cellX[idx];
int yCell = cellY[idx];
double area1 = ((positionX[idx] - cellX[idx]*dxC) * (positionY[idx] - cellY[idx]*dyC))*invArea;
double area2 = (((cellX[idx]*dxC + dxC) - positionX[idx]) * (positionY[idx] - cellY[idx]*dyC))*invArea;
double area3 = ((positionX[idx] - cellX[idx]*dxC) * ((cellY[idx]*dyC + dyC) - positionY[idx]))*invArea;
double area4 = (((cellX[idx]*dxC + dxC) - positionX[idx]) * ((cellY[idx]*dyC + dyC) - positionY[idx]))*invArea;
//way with integer logic
density[xCell*y + yCell] += (area1 * chargeC);
density[((xCell+1)%nxC)*y + yCell] += (area2 * chargeC);
density[(xCell)*y + ((yCell+1)%nyC)] += (area3 * chargeC);
density[((xCell+1)%nxC)*y + ((yCell+1)%nyC)] += (area4 * chargeC);
//using atomic add
/*
atomicAddDouble(&density[xCell*y + yCell], area1*chargeC);
atomicAddDouble(&density[((xCell+1)%nxC)*y + yCell], area2*chargeC);
atomicAddDouble(&density[xCell*y + ((yCell+1)%nyC)], area3*chargeC);
atomicAddDouble(&density[((xCell+1)%nxC)*y + ((yCell+1)%nyC)], area4*chargeC);
*/
__syncthreads();
}
}
/**
* \brief Kernel to solve the Poisson equation in Fourier space
*
* \param arr Array containing output of forward R2C Fourier transform
* \param nyh Physical y-dimension of Fourier transform output (not logical size due to Hermitian symmetry)
*/
__global__ void fftPoissonSolver(hipfftDoubleComplex* arr, const int nyh) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
int n = nxC*nyh;
//initialising shared memory and loading array into it
extern __shared__ double sdata[];
sdata[2*tid] = (idx < n) ? arr[idx].x : 0;
sdata[2*tid+1] = (idx < n) ? arr[idx].y : 0;
__syncthreads();
double pi = 3.141592654f;
int i, j;
int II, JJ;
double k1, k2;
if (idx < n) {
i = idx/nyh;
j = idx%nyh;
//setting II and JJ
if (2*i < nxC){
II = i;
} else {
II = i - nxC;
}
if (2*j < nyh) {
JJ = j;
} else {
JJ = j - nyh;
}
//setting wavevector
k1 = 2*pi*II;
k2 = 2*pi*JJ;
double fact = k1*k1 + k2*k2;
double invFact = __fdividef(-1.0, fact);
if (fabsf(fact) < 1e-14) {
arr[idx].x = 0.0;
arr[idx].y = 0.0;
} else {
arr[idx].x *= invFact;
arr[idx].y *= invFact;
}
}
}
/**
* \brief Simple kernel to copy memory from device array to device array
* Avoids latency of devicetodevice memcpy API calls
*
* \param arr array containing inverse FFT output
*/
__global__ void copyD2D(double *dest, double *src) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int n = nxC*nyC;
if (idx < n) {
dest[idx] = src[idx];
}
}
/**
* \brief Kernel to normalise the output of the Fourier transform
*
* \param arr array containing inverse FFT output
*/
__global__ void normaliseTransform(double *arr) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
int n = nxC*nyC;
//initialising shared memory and loading array into it
extern __shared__ double sdata[];
sdata[tid] = (idx < n) ? arr[idx] : 0;
__syncthreads();
if (idx < n) {
double norm = __fdividef(-1.0, n);
arr[idx] *= norm;
}
}
/**
* \brief Kernel to compute the electric field given the electric potential
*
* \param fieldX double array to store the values of the electric field in x-direction
* \param fieldY double array to store the values of the electric field in y-direction
* \param potential double array in which the electric potential is stored
*/
__global__ void computeElectricField(double* fieldX, double* fieldY, double* potential) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
int n = nxC*nyC;
//initialising shared memory and loading array into it
extern __shared__ double sdata[];
sdata[tid] = (idx < n) ? potential[idx] : 0;
__syncthreads();
double divisorX = 2*dxC;
double divisorY = 2*dyC;
if (idx < n) {
//double i = idx%nyC;
//double j = idx/nyC;
double i = idx/nxC;
double j = idx%nyC;
//setting x component of electric field
if (i==0){
fieldX[idx] = (potential[idx+(nxC-1)]-potential[idx+1])*divisorX;
}
else if (i==(nxC-1)) {
fieldX[idx] = (potential[idx-1]-potential[idx-(nxC-1)])*divisorX;
}
else {
fieldX[idx] = (potential[idx-1]-potential[idx+1])*divisorX;
}
//setting y component of electric field
if (j==0) {
fieldY[idx] = (potential[idx+nyC*(nxC-1)]-potential[idx+nyC])*divisorY;
}
else if (j==(nyC-1)) {
fieldY[idx] = (potential[idx-nyC]-potential[idx-nyC*(nxC-1)])*divisorY;
}
else {
fieldY[idx] = (potential[idx-nyC]-potential[idx+nyC])*divisorY;
}
}
}
/**
* \brief Kernel to compute the force acting on each particle
*
* \param positionX double array containing x-coordinate of each particle
* \param positionY double array containing y-coordinate of each particle
* \param cellX int array containing current cell in x-direction of each particle
* \param cellY int array containing current cell in y-direction of each particle
* \param fieldX double array containing x-component of electric field at each grid point
* \param fieldY double array containing y-component of electric field at each grid point
* \param forceX double array to store x-component of force on each particle
* \param forceY double array to store y-component of force on each particle
*/
__global__ void forceCalculation(double* positionX, double* positionY, int* cellX, int* cellY, double* fieldX, double* fieldY, double* forceX, double* forceY) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
double invArea = 1.0/cellAreaC;
int n = npxC*npyC;
int y = nyC;
//initialising shared memory and loading arrays into it
extern __shared__ double sdata[];
/*
sdata[6*tid] = (idx < n) ? positionX[idx] : 0;
sdata[6*tid+1] = (idx < n) ? positionY[idx] : 0;
sdata[6*tid+2] = (idx < n) ? cellX[idx] : 0;
sdata[6*tid+3] = (idx < n) ? cellY[idx] : 0;
sdata[6*tid+4] = (idx < n) ? fieldX[idx] : 0;
sdata[6*tid+5] = (idx < n) ? fieldY[idx] : 0;
*/
sdata[4*tid] = (idx < n) ? positionX[idx] : 0;
sdata[4*tid+1] = (idx < n) ? positionY[idx] : 0;
sdata[4*tid+2] = (idx < n) ? cellX[idx] : 0;
sdata[4*tid+3] = (idx < n) ? cellY[idx] : 0;
__syncthreads();
//computing force acting on each particle
if (idx < n) {
double tmp = 0.0;
int xCell = cellX[idx];
int yCell = cellY[idx];
//printf("xCell is %d, yCell is %d\n", xCell, yCell);
double area1 = ((positionX[idx] - xCell*dxC) * (positionY[idx] - yCell*dyC))*invArea;
double area2 = (((xCell*dxC + dxC) - positionX[idx]) * (positionY[idx] - yCell*dyC))*invArea;
double area3 = ((positionX[idx] - xCell*dxC) * ((yCell*dyC + dyC) - positionY[idx]))*invArea;
double area4 = (((xCell*dxC + dxC) - positionX[idx]) * ((yCell*dyC + dyC) - positionY[idx]))*invArea;
//computing X component of the force
//with integer arithmetic
forceX[idx] = 0.0;
//integer arithmetic
tmp += area1 * fieldX[xCell*y + yCell];
tmp += area2 * fieldX[((xCell+1)%nxC)*y + yCell];
tmp += area3 * fieldX[xCell*y + ((yCell+1)%y)];
tmp += area4 * fieldX[((xCell+1)%nxC)*y + (yCell+1)%y];
forceX[idx] = tmp*qmC;
//computing Y component of the force
//using integer arithmetic
forceY[idx] = 0.0;
tmp = 0.0;
tmp += area1 * fieldY[xCell*y + yCell];
tmp += area2 * fieldY[((xCell+1)%nxC)*y + yCell];
tmp += area3 * fieldY[xCell*y + (yCell+1)%y];
tmp += area4 * fieldY[((xCell+1)%nxC)*y + (yCell+1)%y];
forceY[idx] = tmp*qmC;
}
}
/**
* \brief Kernel to update the particle positions given the force acting on them
*
* \param positionX double array containing x-component of position
* \param positionY double array containing y-component of position
* \param velocityX double array containing x-component of velocity
* \param velocityY double array containing y-component of velocity
* \param forceX double array containing x-component of force
* \param forceY double array containing y-component of force
* \param forceOldX double array containing x-component of force at previous time-step
* \param forceOldY double array containing y-component of force at previous time-step
* \param cellX int array containing current cell in x-direction of particle
* \param cellY int array containing current cell in y-dircetion of particle
*/
__global__ void positionUpdate(double* positionX, double* positionY, double* velocityX, double* velocityY, double* forceX, double* forceY, double* forceOldX, double* forceOldY, int* cellX, int* cellY) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
int n = npC;
//initialising shared memory and loading arrays into it
extern __shared__ double sdata[];
sdata[4*tid] = (idx < n) ? positionX[idx] : 0;
sdata[4*tid+1] = (idx < n) ? positionY[idx] : 0;
sdata[4*tid+2] = (idx < n) ? velocityX[idx] : 0;
sdata[4*tid+3] = (idx < n) ? velocityY[idx] : 0;
__syncthreads();
if (idx < n) {
//compute velocity at half time step
double vxh = velocityX[idx] + 0.5*dtC*forceOldX[idx];
double vyh = velocityY[idx] + 0.5*dtC*forceOldY[idx];
//update position
positionX[idx] += vxh*dtC;
positionY[idx] += vyh*dtC;
//correct position to ensure 0 < posX/posY < xDim/yDim
//if (positionX[idx] >= xDimC) {positionX[idx] -= xDimC;}
if (positionX[idx] > xDimC) {positionX[idx] -= xDimC;}
else if (positionX[idx] < 0) {positionX[idx] += xDimC;}
//if (positionY[idx] >= yDimC) {positionY[idx] -= yDimC;}
if (positionY[idx] > yDimC) {positionY[idx] -= yDimC;}
else if (positionY[idx] < 0) {positionY[idx] += yDimC;}
//update velocity
velocityX[idx] = vxh + 0.5*dtC*forceX[idx];
velocityY[idx] = vyh + 0.5*dtC*forceY[idx];
//update current cell
cellX[idx] = (int)(positionX[idx]/dxC);
cellY[idx] = (int)(positionY[idx]/dyC);
}
}
/**
* \brief Kernel to initialise cuRAND RNG
*
* \param state random number generator state
*/
__global__ void initialiseGenerator(hiprandState_t *state) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
//setting up each thread with same seed but different sequence and no offset
hiprand_init(1234, idx, 0, &state[idx]);
}
/**
* \brief Kernel to simulate Monte-Carlo collisions between electrons
*
* \param state random number generator state
* \param velocityX array containing x-coordinate of particle velocity
* \param velocityY array containing y-coordinate of particle velocity
*/
__global__ void collisions(hiprandState_t *state, double* velocityX, double* velocityY) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
int n = npC;
double p, r;
//initialising shared memory and loading arrays into it
extern __shared__ double sdata[];
sdata[2*tid] = (idx < n) ? velocityX[idx] : 0;
sdata[2*tid+1] = (idx < n) ? velocityY[idx] : 0;
if (idx < n) {
//generate collision probability, p
p = 0.22;
//generate random number, r
hiprandState_t localState = state[idx];
r = hiprand_uniform_double(&localState);
//if probability > random number, collision occurs
if (p > r ) {
//we replace the velocities from the maxwellian distribution
double max1 = 2*(hiprand_uniform_double(&localState) + hiprand_uniform_double(&localState) + hiprand_uniform_double(&localState) - 1.5);
double max2 = 2*(hiprand_uniform_double(&localState) + hiprand_uniform_double(&localState) + hiprand_uniform_double(&localState) - 1.5);
velocityX[idx] = max1;
velocityY[idx] = max2;
}
}
}
/**
* \brief Function to declare device variables and call relevant kernels on the gpu
*
* \param
* \param
*/
double picFloatGpu(double* positionX, double *positionY, double* velocityX, double* velocityY, double *xResult, double *yResult, std::shared_ptr<Params> p) {
hipSetDevice(0);
//hipSetDevice(1);
//creating cuda streams
hipStream_t stream[2];
for (int i=0; i<2; i++) {
hipStreamCreate(&stream[i]);
}
printf("starting on gpu\n");
//initialise required variables for time measurement
hipEvent_t start, finish;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&finish);
hipEventRecord(start, 0);
//declaring variables to be stored in constant memory
const int xDim = p->xDim;
const int yDim = p->yDim;
const int nx = p->nx;
const int ny = p->ny;
const int npx = p->numParticlesX;
const int npy = p->numParticlesY;
const int np = npx*npy;
const double charge = p->electronCharge;
const double mass = p->mass;
const double dx = p->dx;
const double dy = p->dy;
const double area = p->area;
const double qm = p->electronCharge/p->mass;
const double dt = p->dt;
//grid configuration for particle kernels
dim3 dimBlock(p->blockSize);
dim3 dimGridParticles((np/dimBlock.x)+(!(np%dimBlock.x)?0:1));
//grid configuration for grid kernels
int ng = nx*ny;
dim3 dimGridCells((ng/dimBlock.x)+(!(ng%dimBlock.x)?0:1));
//shared memory configuration
int smem1 = dimBlock.x*sizeof(double); //size of shared memory in each block (1 double per thread)
int smem2 = dimBlock.x*2*sizeof(double); //size of shared memory in each block (2 variables per thread)
int smem4 = dimBlock.x*4*sizeof(double); //size of shared memory in each block (4 variables per thread)
//copying data to constant memory
hipMemcpyToSymbolAsync(xDimC, &xDim, sizeof(int), 0, hipMemcpyHostToDevice, stream[0]);
hipMemcpyToSymbolAsync(yDimC, &yDim, sizeof(int), 0, hipMemcpyHostToDevice, stream[1]);
hipMemcpyToSymbolAsync(nxC, &nx, sizeof(int), 0, hipMemcpyHostToDevice, stream[0]);
hipMemcpyToSymbolAsync(nyC, &ny, sizeof(int), 0, hipMemcpyHostToDevice, stream[1]);
hipMemcpyToSymbolAsync(npxC, &npx, sizeof(int), 0, hipMemcpyHostToDevice, stream[0]);
hipMemcpyToSymbolAsync(npyC, &npy, sizeof(int), 0, hipMemcpyHostToDevice, stream[1]);
hipMemcpyToSymbolAsync(npC, &np, sizeof(int), 0, hipMemcpyHostToDevice, stream[0]);
hipMemcpyToSymbolAsync(chargeC, &charge, sizeof(double), 0, hipMemcpyHostToDevice, stream[1]);
hipMemcpyToSymbolAsync(massC, &mass, sizeof(double), 0, hipMemcpyHostToDevice, stream[0]);
hipMemcpyToSymbolAsync(dxC, &dx, sizeof(double), 0, hipMemcpyHostToDevice, stream[1]);
hipMemcpyToSymbolAsync(dyC, &dy, sizeof(double), 0, hipMemcpyHostToDevice, stream[0]);
hipMemcpyToSymbolAsync(cellAreaC, &area, sizeof(double), 0, hipMemcpyHostToDevice, stream[1]);
hipMemcpyToSymbolAsync(qmC, &qm, sizeof(double), 0, hipMemcpyHostToDevice, stream[0]);
hipMemcpyToSymbolAsync(dtC, &dt, sizeof(double), 0, hipMemcpyHostToDevice, stream[1]);
//pointers to device memory
double *positionXGpu, *positionYGpu;
int *currentCellXGpu, *currentCellYGpu;
double *velocityXGpu, *velocityYGpu;
double *forceXGpu, *forceYGpu;
double *forceXOldGpu, *forceYOldGpu;
double *chargeDensityGpu;
double *electricPotentialGpu;
double *electricFieldXGpu, *electricFieldYGpu;
//allocating device memory for data arrays
hipMalloc((void**) &positionXGpu, np*sizeof(double)); //memory for particle positions
hipMalloc((void**) &positionYGpu, np*sizeof(double)); //memory for particle positions
hipMalloc((void**) &velocityXGpu, np*sizeof(double)); //memory for velocity of particles
hipMalloc((void**) &velocityYGpu, np*sizeof(double)); //memory for velocity of particles
hipMalloc((void**) ¤tCellXGpu, np*sizeof(int)); //memory for current cell of each particle
hipMalloc((void**) ¤tCellYGpu, np*sizeof(int)); //memory for current cell of each particle
hipMalloc((void**) &forceXGpu, np*sizeof(double)); //memory for force acting on particles
hipMalloc((void**) &forceYGpu, np*sizeof(double)); //memory for force acting on particles
hipMalloc((void**) &forceXOldGpu, np*sizeof(double)); //memory for force acting on particles at previous timestep
hipMalloc((void**) &forceYOldGpu, np*sizeof(double)); //memory for force acting on particles at previous timestep
hipMalloc((void**) &chargeDensityGpu, nx*ny*sizeof(double)); //memory for charge density at all grid points
hipMalloc((void**) &electricPotentialGpu, nx*ny*sizeof(double)); //memory for electric potential at all grid points
hipMalloc((void**) &electricFieldXGpu, nx*ny*sizeof(double)); //memory for electric field at all grid points
hipMalloc((void**) &electricFieldYGpu, nx*ny*sizeof(double)); //memory for electric field at all grid points
//copying particle data from host
hipMemcpyAsync(positionXGpu, positionX, np*sizeof(double), hipMemcpyHostToDevice, stream[0]);
hipMemcpyAsync(positionYGpu, positionY, np*sizeof(double), hipMemcpyHostToDevice, stream[1]);
hipMemcpyAsync(velocityXGpu, velocityX, np*sizeof(double), hipMemcpyHostToDevice, stream[0]);
hipMemcpyAsync(velocityYGpu, velocityY, np*sizeof(double), hipMemcpyHostToDevice, stream[1]);
//creating cuFFT plans
hipfftHandle planForward, planInverse;
hipfftCreate(&planForward);
hipfftCreate(&planInverse);
hipfftPlan2d(&planForward, nx, ny, HIPFFT_D2Z);
hipfftPlan2d(&planInverse, nx, ny, HIPFFT_Z2D);
//allocating memory for fft
const int nyh = ny/2 + 1;
double *in;
hipfftDoubleComplex *out;
hipMalloc((void**) &out, nx*nyh*sizeof(hipfftDoubleComplex));
//declaring variables for hiprand RNG
hiprandState_t *states;
hipMalloc((void **)&states, np*sizeof(hiprandState_t));
//initialise hiprand generator
hipLaunchKernelGGL(( initialiseGenerator), dim3(dimGridParticles),dim3(dimBlock),smem2,stream[0], states);
//starting iteration
double t = 0;
printf("Starting iteration on GPU.\n");
while (t < p->tmax) {
//setting current cells of each particle
hipLaunchKernelGGL(( currentCell), dim3(dimGridParticles),dim3(dimBlock),smem2,stream[0], positionXGpu, positionYGpu, currentCellXGpu, currentCellYGpu);
//interpolating charge to gridpoints
hipLaunchKernelGGL(( chargeAssignment), dim3(dimGridParticles),dim3(dimBlock),smem4,stream[0], positionXGpu, positionYGpu, currentCellXGpu, currentCellYGpu, chargeDensityGpu);
//forward R2C transform
in = chargeDensityGpu;
hipfftExecD2Z(planForward, in, out);
//computing poisson equation in Fourier space
hipLaunchKernelGGL(( fftPoissonSolver), dim3(dimGridCells),dim3(dimBlock),smem2,stream[0], out, nyh);
//inverse C2R transform and normalisation
hipfftExecZ2D(planInverse, out, in);
hipLaunchKernelGGL(( normaliseTransform), dim3(dimGridCells), dim3(dimBlock), smem2, stream[0], in);
//copy FFT Poisson output to electric potential array
hipLaunchKernelGGL(( copyD2D), dim3(dimGridCells),dim3(dimBlock),0,stream[0], electricPotentialGpu, in);
//computing electric field from electric potential
hipLaunchKernelGGL(( computeElectricField), dim3(dimGridCells),dim3(dimBlock),smem1,stream[0], electricFieldXGpu, electricFieldYGpu, electricPotentialGpu);
//computing force on each particle
hipLaunchKernelGGL(( forceCalculation), dim3(dimGridParticles),dim3(dimBlock),smem4,stream[0], positionXGpu, positionYGpu, currentCellXGpu, currentCellYGpu, electricFieldXGpu, electricFieldYGpu, forceXGpu, forceYGpu);
//updating particle positions
hipLaunchKernelGGL(( positionUpdate), dim3(dimGridParticles),dim3(dimBlock),smem4,stream[0],
positionXGpu, positionYGpu, velocityXGpu, velocityYGpu, forceXGpu, forceYGpu, forceXOldGpu, forceYOldGpu, currentCellXGpu, currentCellYGpu);
//copy new force to forceOld arrays
hipLaunchKernelGGL(( copyD2D), dim3(dimGridCells),dim3(dimBlock),0,stream[0], forceXOldGpu, forceXGpu);
hipLaunchKernelGGL(( copyD2D), dim3(dimGridCells),dim3(dimBlock),0,stream[1], forceYOldGpu, forceYGpu);
//simulate collisions
hipLaunchKernelGGL(( collisions), dim3(dimGridParticles),dim3(dimBlock),smem2,stream[0], states,velocityXGpu, velocityYGpu);
t += dt;
}
//copying results back to the host
hipMemcpyAsync(xResult, positionXGpu, np*sizeof(double), hipMemcpyDeviceToHost, stream[0]);
hipMemcpyAsync(yResult, positionYGpu, np*sizeof(double), hipMemcpyDeviceToHost, stream[1]);
//stop timer
hipEventRecord(finish, 0);
hipEventSynchronize(start);
hipEventSynchronize(finish);
hipEventElapsedTime(&elapsedTime, start, finish);
//destroying cuda streams
for (int i=0; i<2; i++) {
hipStreamDestroy(stream[i]);
}
//freeing dynamically allocated memory
hipFree(positionXGpu), hipFree(positionYGpu);
hipFree(velocityXGpu), hipFree(velocityYGpu);
hipFree(currentCellXGpu), hipFree(currentCellYGpu);
hipFree(forceXGpu), hipFree(forceYGpu);
hipFree(forceXOldGpu), hipFree(forceYOldGpu);
hipFree(chargeDensityGpu);
hipFree(electricPotentialGpu);
hipFree(electricFieldXGpu), hipFree(electricFieldYGpu);
printf("finished on gpu\n");
//return elapsed time in seconds
return (elapsedTime/1000);
}
| d534bb24fe1f39df91b39b688d451d7b1ae2e1d2.cu | #include <memory>
#include <stdio.h>
#include <iostream>
#include "params.h"
#include "particle.h"
#include "pic.h"
#include <cufft.h>
#include <cufftXt.h>
#include <unistd.h>
#include <curand_kernel.h>
//declaring variables in constant memory
__device__ __constant__ int xDimC; //size of grid in x dimension
__device__ __constant__ int yDimC; //size of grid in y dimension
__device__ __constant__ int nxC; //num grid points in x direction
__device__ __constant__ int nyC; //num grid points in y direction
__device__ __constant__ int npxC; //num particles in x direction
__device__ __constant__ int npyC; //num particles in y direction
__device__ __constant__ int npC; //num particles in y direction
__device__ __constant__ double chargeC; //charge of superparticle
__device__ __constant__ double massC; //mass of superparticle
__device__ __constant__ double dxC; //size of cell in x direction
__device__ __constant__ double dyC; //size of cell in y direction
__device__ __constant__ double cellAreaC; //area of one cell
__device__ __constant__ double qmC; //ration of charge to mass of particle
__device__ __constant__ double dtC; //time increment between iterations
#define CHECK_ERROR(err)\
if (err != cudaSuccess){ \
std::cerr << "ERROR:" << cudaGetErrorString(err) << '\n'; \
exit (-1); \
}
#define CHECK_LAST_ERROR(err)\
{cudaError_t = cudaGetLastError(); \
if (err != cudaSuccess) {\
std::cerr << cudaGetErrorString(err) << '\n'; \
exit(-1); \
}\
}
/**
* \brief Function to perform double-precision atomic addition in CUDA.
* Necessary to use atomic add for doubles with compute capability < 6.0
*
* \param address of value to add to
* \param value to add
*/
__device__ double atomicAddDouble(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull;
unsigned long long int assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
/**
* \brief Cuda kernel to calculate current cell of each particle on the gpu
*
* \param positionX Array containing x-coordinates of all particles
* \param positionY Array containing y-coordinates of all particles
* \param cellX Array to store current cell in x direction of each particle
* \param cellY Array to store current cell in y direction of each particle
*/
__global__ void currentCell(double* positionX, double* positionY, int* cellX, int* cellY) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
//int idy = blockDim.y * blockIdx.y + threadIdx.y;
int tid = threadIdx.x;
//loading size of particles vector from grid
int n = npxC*npyC;
//initialising shared memory and loading array into it
extern __shared__ double sdata[];
sdata[2*tid] = (idx < n) ? positionX[idx] : 0;
sdata[2*tid+1] = (idx < n) ? positionY[idx] : 0;
__syncthreads();
if (idx < n) {
cellX[idx] = (int) (positionX[idx]/dxC); //integer arithmetic
cellY[idx] = (int) (positionY[idx]/dyC);
}
}
/*
* \brief Cuda kernel to assign particle charges to the nearest grid points
*
* \param positionX Array containing x-coordinates of all particles
* \param positionY Array containing y-coordinates of all particles
* \param cellX Array containing current cell in x direction of each particle
* \param cellY Array containing current cell in y direction of each particle
* \param density Array to store the charge density of each gridpoint
*/
__global__ void chargeAssignment(double* positionX, double* positionY, int* cellX, int* cellY, double* density) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
double invArea = 1.0/cellAreaC;
int n = npxC*npyC;
int y = nyC;
extern __shared__ double sdata[];
sdata[4*tid] = (idx < n) ? positionX[idx] : 0;
sdata[4*tid+1] = (idx < n) ? positionY[idx] : 0;
sdata[4*tid+2] = (idx < n) ? cellX[idx] : 0;
sdata[4*tid+3] = (idx < n) ? cellY[idx] : 0;
__syncthreads();
//adding to charge density of nearby points
if (idx < n) {
int xCell = cellX[idx];
int yCell = cellY[idx];
double area1 = ((positionX[idx] - cellX[idx]*dxC) * (positionY[idx] - cellY[idx]*dyC))*invArea;
double area2 = (((cellX[idx]*dxC + dxC) - positionX[idx]) * (positionY[idx] - cellY[idx]*dyC))*invArea;
double area3 = ((positionX[idx] - cellX[idx]*dxC) * ((cellY[idx]*dyC + dyC) - positionY[idx]))*invArea;
double area4 = (((cellX[idx]*dxC + dxC) - positionX[idx]) * ((cellY[idx]*dyC + dyC) - positionY[idx]))*invArea;
//way with integer logic
density[xCell*y + yCell] += (area1 * chargeC);
density[((xCell+1)%nxC)*y + yCell] += (area2 * chargeC);
density[(xCell)*y + ((yCell+1)%nyC)] += (area3 * chargeC);
density[((xCell+1)%nxC)*y + ((yCell+1)%nyC)] += (area4 * chargeC);
//using atomic add
/*
atomicAddDouble(&density[xCell*y + yCell], area1*chargeC);
atomicAddDouble(&density[((xCell+1)%nxC)*y + yCell], area2*chargeC);
atomicAddDouble(&density[xCell*y + ((yCell+1)%nyC)], area3*chargeC);
atomicAddDouble(&density[((xCell+1)%nxC)*y + ((yCell+1)%nyC)], area4*chargeC);
*/
__syncthreads();
}
}
/**
* \brief Kernel to solve the Poisson equation in Fourier space
*
* \param arr Array containing output of forward R2C Fourier transform
* \param nyh Physical y-dimension of Fourier transform output (not logical size due to Hermitian symmetry)
*/
__global__ void fftPoissonSolver(cufftDoubleComplex* arr, const int nyh) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
int n = nxC*nyh;
//initialising shared memory and loading array into it
extern __shared__ double sdata[];
sdata[2*tid] = (idx < n) ? arr[idx].x : 0;
sdata[2*tid+1] = (idx < n) ? arr[idx].y : 0;
__syncthreads();
double pi = 3.141592654f;
int i, j;
int II, JJ;
double k1, k2;
if (idx < n) {
i = idx/nyh;
j = idx%nyh;
//setting II and JJ
if (2*i < nxC){
II = i;
} else {
II = i - nxC;
}
if (2*j < nyh) {
JJ = j;
} else {
JJ = j - nyh;
}
//setting wavevector
k1 = 2*pi*II;
k2 = 2*pi*JJ;
double fact = k1*k1 + k2*k2;
double invFact = __fdividef(-1.0, fact);
if (fabsf(fact) < 1e-14) {
arr[idx].x = 0.0;
arr[idx].y = 0.0;
} else {
arr[idx].x *= invFact;
arr[idx].y *= invFact;
}
}
}
/**
* \brief Simple kernel to copy memory from device array to device array
* Avoids latency of devicetodevice memcpy API calls
*
* \param arr array containing inverse FFT output
*/
__global__ void copyD2D(double *dest, double *src) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int n = nxC*nyC;
if (idx < n) {
dest[idx] = src[idx];
}
}
/**
* \brief Kernel to normalise the output of the Fourier transform
*
* \param arr array containing inverse FFT output
*/
__global__ void normaliseTransform(double *arr) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
int n = nxC*nyC;
//initialising shared memory and loading array into it
extern __shared__ double sdata[];
sdata[tid] = (idx < n) ? arr[idx] : 0;
__syncthreads();
if (idx < n) {
double norm = __fdividef(-1.0, n);
arr[idx] *= norm;
}
}
/**
* \brief Kernel to compute the electric field given the electric potential
*
* \param fieldX double array to store the values of the electric field in x-direction
* \param fieldY double array to store the values of the electric field in y-direction
* \param potential double array in which the electric potential is stored
*/
__global__ void computeElectricField(double* fieldX, double* fieldY, double* potential) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
int n = nxC*nyC;
//initialising shared memory and loading array into it
extern __shared__ double sdata[];
sdata[tid] = (idx < n) ? potential[idx] : 0;
__syncthreads();
double divisorX = 2*dxC;
double divisorY = 2*dyC;
if (idx < n) {
//double i = idx%nyC;
//double j = idx/nyC;
double i = idx/nxC;
double j = idx%nyC;
//setting x component of electric field
if (i==0){
fieldX[idx] = (potential[idx+(nxC-1)]-potential[idx+1])*divisorX;
}
else if (i==(nxC-1)) {
fieldX[idx] = (potential[idx-1]-potential[idx-(nxC-1)])*divisorX;
}
else {
fieldX[idx] = (potential[idx-1]-potential[idx+1])*divisorX;
}
//setting y component of electric field
if (j==0) {
fieldY[idx] = (potential[idx+nyC*(nxC-1)]-potential[idx+nyC])*divisorY;
}
else if (j==(nyC-1)) {
fieldY[idx] = (potential[idx-nyC]-potential[idx-nyC*(nxC-1)])*divisorY;
}
else {
fieldY[idx] = (potential[idx-nyC]-potential[idx+nyC])*divisorY;
}
}
}
/**
* \brief Kernel to compute the force acting on each particle
*
* \param positionX double array containing x-coordinate of each particle
* \param positionY double array containing y-coordinate of each particle
* \param cellX int array containing current cell in x-direction of each particle
* \param cellY int array containing current cell in y-direction of each particle
* \param fieldX double array containing x-component of electric field at each grid point
* \param fieldY double array containing y-component of electric field at each grid point
* \param forceX double array to store x-component of force on each particle
* \param forceY double array to store y-component of force on each particle
*/
__global__ void forceCalculation(double* positionX, double* positionY, int* cellX, int* cellY, double* fieldX, double* fieldY, double* forceX, double* forceY) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
double invArea = 1.0/cellAreaC;
int n = npxC*npyC;
int y = nyC;
//initialising shared memory and loading arrays into it
extern __shared__ double sdata[];
/*
sdata[6*tid] = (idx < n) ? positionX[idx] : 0;
sdata[6*tid+1] = (idx < n) ? positionY[idx] : 0;
sdata[6*tid+2] = (idx < n) ? cellX[idx] : 0;
sdata[6*tid+3] = (idx < n) ? cellY[idx] : 0;
sdata[6*tid+4] = (idx < n) ? fieldX[idx] : 0;
sdata[6*tid+5] = (idx < n) ? fieldY[idx] : 0;
*/
sdata[4*tid] = (idx < n) ? positionX[idx] : 0;
sdata[4*tid+1] = (idx < n) ? positionY[idx] : 0;
sdata[4*tid+2] = (idx < n) ? cellX[idx] : 0;
sdata[4*tid+3] = (idx < n) ? cellY[idx] : 0;
__syncthreads();
//computing force acting on each particle
if (idx < n) {
double tmp = 0.0;
int xCell = cellX[idx];
int yCell = cellY[idx];
//printf("xCell is %d, yCell is %d\n", xCell, yCell);
double area1 = ((positionX[idx] - xCell*dxC) * (positionY[idx] - yCell*dyC))*invArea;
double area2 = (((xCell*dxC + dxC) - positionX[idx]) * (positionY[idx] - yCell*dyC))*invArea;
double area3 = ((positionX[idx] - xCell*dxC) * ((yCell*dyC + dyC) - positionY[idx]))*invArea;
double area4 = (((xCell*dxC + dxC) - positionX[idx]) * ((yCell*dyC + dyC) - positionY[idx]))*invArea;
//computing X component of the force
//with integer arithmetic
forceX[idx] = 0.0;
//integer arithmetic
tmp += area1 * fieldX[xCell*y + yCell];
tmp += area2 * fieldX[((xCell+1)%nxC)*y + yCell];
tmp += area3 * fieldX[xCell*y + ((yCell+1)%y)];
tmp += area4 * fieldX[((xCell+1)%nxC)*y + (yCell+1)%y];
forceX[idx] = tmp*qmC;
//computing Y component of the force
//using integer arithmetic
forceY[idx] = 0.0;
tmp = 0.0;
tmp += area1 * fieldY[xCell*y + yCell];
tmp += area2 * fieldY[((xCell+1)%nxC)*y + yCell];
tmp += area3 * fieldY[xCell*y + (yCell+1)%y];
tmp += area4 * fieldY[((xCell+1)%nxC)*y + (yCell+1)%y];
forceY[idx] = tmp*qmC;
}
}
/**
* \brief Kernel to update the particle positions given the force acting on them
*
* \param positionX double array containing x-component of position
* \param positionY double array containing y-component of position
* \param velocityX double array containing x-component of velocity
* \param velocityY double array containing y-component of velocity
* \param forceX double array containing x-component of force
* \param forceY double array containing y-component of force
* \param forceOldX double array containing x-component of force at previous time-step
* \param forceOldY double array containing y-component of force at previous time-step
* \param cellX int array containing current cell in x-direction of particle
* \param cellY int array containing current cell in y-dircetion of particle
*/
__global__ void positionUpdate(double* positionX, double* positionY, double* velocityX, double* velocityY, double* forceX, double* forceY, double* forceOldX, double* forceOldY, int* cellX, int* cellY) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
int n = npC;
//initialising shared memory and loading arrays into it
extern __shared__ double sdata[];
sdata[4*tid] = (idx < n) ? positionX[idx] : 0;
sdata[4*tid+1] = (idx < n) ? positionY[idx] : 0;
sdata[4*tid+2] = (idx < n) ? velocityX[idx] : 0;
sdata[4*tid+3] = (idx < n) ? velocityY[idx] : 0;
__syncthreads();
if (idx < n) {
//compute velocity at half time step
double vxh = velocityX[idx] + 0.5*dtC*forceOldX[idx];
double vyh = velocityY[idx] + 0.5*dtC*forceOldY[idx];
//update position
positionX[idx] += vxh*dtC;
positionY[idx] += vyh*dtC;
//correct position to ensure 0 < posX/posY < xDim/yDim
//if (positionX[idx] >= xDimC) {positionX[idx] -= xDimC;}
if (positionX[idx] > xDimC) {positionX[idx] -= xDimC;}
else if (positionX[idx] < 0) {positionX[idx] += xDimC;}
//if (positionY[idx] >= yDimC) {positionY[idx] -= yDimC;}
if (positionY[idx] > yDimC) {positionY[idx] -= yDimC;}
else if (positionY[idx] < 0) {positionY[idx] += yDimC;}
//update velocity
velocityX[idx] = vxh + 0.5*dtC*forceX[idx];
velocityY[idx] = vyh + 0.5*dtC*forceY[idx];
//update current cell
cellX[idx] = (int)(positionX[idx]/dxC);
cellY[idx] = (int)(positionY[idx]/dyC);
}
}
/**
* \brief Kernel to initialise cuRAND RNG
*
* \param state random number generator state
*/
__global__ void initialiseGenerator(curandState *state) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
//setting up each thread with same seed but different sequence and no offset
curand_init(1234, idx, 0, &state[idx]);
}
/**
* \brief Kernel to simulate Monte-Carlo collisions between electrons
*
* \param state random number generator state
* \param velocityX array containing x-coordinate of particle velocity
* \param velocityY array containing y-coordinate of particle velocity
*/
__global__ void collisions(curandState *state, double* velocityX, double* velocityY) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
int n = npC;
double p, r;
//initialising shared memory and loading arrays into it
extern __shared__ double sdata[];
sdata[2*tid] = (idx < n) ? velocityX[idx] : 0;
sdata[2*tid+1] = (idx < n) ? velocityY[idx] : 0;
if (idx < n) {
//generate collision probability, p
p = 0.22;
//generate random number, r
curandState localState = state[idx];
r = curand_uniform_double(&localState);
//if probability > random number, collision occurs
if (p > r ) {
//we replace the velocities from the maxwellian distribution
double max1 = 2*(curand_uniform_double(&localState) + curand_uniform_double(&localState) + curand_uniform_double(&localState) - 1.5);
double max2 = 2*(curand_uniform_double(&localState) + curand_uniform_double(&localState) + curand_uniform_double(&localState) - 1.5);
velocityX[idx] = max1;
velocityY[idx] = max2;
}
}
}
/**
* \brief Function to declare device variables and call relevant kernels on the gpu
*
* \param
* \param
*/
double picFloatGpu(double* positionX, double *positionY, double* velocityX, double* velocityY, double *xResult, double *yResult, std::shared_ptr<Params> p) {
cudaSetDevice(0);
//cudaSetDevice(1);
//creating cuda streams
cudaStream_t stream[2];
for (int i=0; i<2; i++) {
cudaStreamCreate(&stream[i]);
}
printf("starting on gpu\n");
//initialise required variables for time measurement
cudaEvent_t start, finish;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&finish);
cudaEventRecord(start, 0);
//declaring variables to be stored in constant memory
const int xDim = p->xDim;
const int yDim = p->yDim;
const int nx = p->nx;
const int ny = p->ny;
const int npx = p->numParticlesX;
const int npy = p->numParticlesY;
const int np = npx*npy;
const double charge = p->electronCharge;
const double mass = p->mass;
const double dx = p->dx;
const double dy = p->dy;
const double area = p->area;
const double qm = p->electronCharge/p->mass;
const double dt = p->dt;
//grid configuration for particle kernels
dim3 dimBlock(p->blockSize);
dim3 dimGridParticles((np/dimBlock.x)+(!(np%dimBlock.x)?0:1));
//grid configuration for grid kernels
int ng = nx*ny;
dim3 dimGridCells((ng/dimBlock.x)+(!(ng%dimBlock.x)?0:1));
//shared memory configuration
int smem1 = dimBlock.x*sizeof(double); //size of shared memory in each block (1 double per thread)
int smem2 = dimBlock.x*2*sizeof(double); //size of shared memory in each block (2 variables per thread)
int smem4 = dimBlock.x*4*sizeof(double); //size of shared memory in each block (4 variables per thread)
//copying data to constant memory
cudaMemcpyToSymbolAsync(xDimC, &xDim, sizeof(int), 0, cudaMemcpyHostToDevice, stream[0]);
cudaMemcpyToSymbolAsync(yDimC, &yDim, sizeof(int), 0, cudaMemcpyHostToDevice, stream[1]);
cudaMemcpyToSymbolAsync(nxC, &nx, sizeof(int), 0, cudaMemcpyHostToDevice, stream[0]);
cudaMemcpyToSymbolAsync(nyC, &ny, sizeof(int), 0, cudaMemcpyHostToDevice, stream[1]);
cudaMemcpyToSymbolAsync(npxC, &npx, sizeof(int), 0, cudaMemcpyHostToDevice, stream[0]);
cudaMemcpyToSymbolAsync(npyC, &npy, sizeof(int), 0, cudaMemcpyHostToDevice, stream[1]);
cudaMemcpyToSymbolAsync(npC, &np, sizeof(int), 0, cudaMemcpyHostToDevice, stream[0]);
cudaMemcpyToSymbolAsync(chargeC, &charge, sizeof(double), 0, cudaMemcpyHostToDevice, stream[1]);
cudaMemcpyToSymbolAsync(massC, &mass, sizeof(double), 0, cudaMemcpyHostToDevice, stream[0]);
cudaMemcpyToSymbolAsync(dxC, &dx, sizeof(double), 0, cudaMemcpyHostToDevice, stream[1]);
cudaMemcpyToSymbolAsync(dyC, &dy, sizeof(double), 0, cudaMemcpyHostToDevice, stream[0]);
cudaMemcpyToSymbolAsync(cellAreaC, &area, sizeof(double), 0, cudaMemcpyHostToDevice, stream[1]);
cudaMemcpyToSymbolAsync(qmC, &qm, sizeof(double), 0, cudaMemcpyHostToDevice, stream[0]);
cudaMemcpyToSymbolAsync(dtC, &dt, sizeof(double), 0, cudaMemcpyHostToDevice, stream[1]);
//pointers to device memory
double *positionXGpu, *positionYGpu;
int *currentCellXGpu, *currentCellYGpu;
double *velocityXGpu, *velocityYGpu;
double *forceXGpu, *forceYGpu;
double *forceXOldGpu, *forceYOldGpu;
double *chargeDensityGpu;
double *electricPotentialGpu;
double *electricFieldXGpu, *electricFieldYGpu;
//allocating device memory for data arrays
cudaMalloc((void**) &positionXGpu, np*sizeof(double)); //memory for particle positions
cudaMalloc((void**) &positionYGpu, np*sizeof(double)); //memory for particle positions
cudaMalloc((void**) &velocityXGpu, np*sizeof(double)); //memory for velocity of particles
cudaMalloc((void**) &velocityYGpu, np*sizeof(double)); //memory for velocity of particles
cudaMalloc((void**) ¤tCellXGpu, np*sizeof(int)); //memory for current cell of each particle
cudaMalloc((void**) ¤tCellYGpu, np*sizeof(int)); //memory for current cell of each particle
cudaMalloc((void**) &forceXGpu, np*sizeof(double)); //memory for force acting on particles
cudaMalloc((void**) &forceYGpu, np*sizeof(double)); //memory for force acting on particles
cudaMalloc((void**) &forceXOldGpu, np*sizeof(double)); //memory for force acting on particles at previous timestep
cudaMalloc((void**) &forceYOldGpu, np*sizeof(double)); //memory for force acting on particles at previous timestep
cudaMalloc((void**) &chargeDensityGpu, nx*ny*sizeof(double)); //memory for charge density at all grid points
cudaMalloc((void**) &electricPotentialGpu, nx*ny*sizeof(double)); //memory for electric potential at all grid points
cudaMalloc((void**) &electricFieldXGpu, nx*ny*sizeof(double)); //memory for electric field at all grid points
cudaMalloc((void**) &electricFieldYGpu, nx*ny*sizeof(double)); //memory for electric field at all grid points
//copying particle data from host
cudaMemcpyAsync(positionXGpu, positionX, np*sizeof(double), cudaMemcpyHostToDevice, stream[0]);
cudaMemcpyAsync(positionYGpu, positionY, np*sizeof(double), cudaMemcpyHostToDevice, stream[1]);
cudaMemcpyAsync(velocityXGpu, velocityX, np*sizeof(double), cudaMemcpyHostToDevice, stream[0]);
cudaMemcpyAsync(velocityYGpu, velocityY, np*sizeof(double), cudaMemcpyHostToDevice, stream[1]);
//creating cuFFT plans
cufftHandle planForward, planInverse;
cufftCreate(&planForward);
cufftCreate(&planInverse);
cufftPlan2d(&planForward, nx, ny, CUFFT_D2Z);
cufftPlan2d(&planInverse, nx, ny, CUFFT_Z2D);
//allocating memory for fft
const int nyh = ny/2 + 1;
double *in;
cufftDoubleComplex *out;
cudaMalloc((void**) &out, nx*nyh*sizeof(cufftDoubleComplex));
//declaring variables for curand RNG
curandState *states;
cudaMalloc((void **)&states, np*sizeof(curandState));
//initialise curand generator
initialiseGenerator<<<dimGridParticles,dimBlock,smem2,stream[0]>>>(states);
//starting iteration
double t = 0;
printf("Starting iteration on GPU.\n");
while (t < p->tmax) {
//setting current cells of each particle
currentCell<<<dimGridParticles,dimBlock,smem2,stream[0]>>>(positionXGpu, positionYGpu, currentCellXGpu, currentCellYGpu);
//interpolating charge to gridpoints
chargeAssignment<<<dimGridParticles,dimBlock,smem4,stream[0]>>>(positionXGpu, positionYGpu, currentCellXGpu, currentCellYGpu, chargeDensityGpu);
//forward R2C transform
in = chargeDensityGpu;
cufftExecD2Z(planForward, in, out);
//computing poisson equation in Fourier space
fftPoissonSolver<<<dimGridCells,dimBlock,smem2,stream[0]>>>(out, nyh);
//inverse C2R transform and normalisation
cufftExecZ2D(planInverse, out, in);
normaliseTransform<<<dimGridCells, dimBlock, smem2, stream[0]>>>(in);
//copy FFT Poisson output to electric potential array
copyD2D<<<dimGridCells,dimBlock,0,stream[0]>>>(electricPotentialGpu, in);
//computing electric field from electric potential
computeElectricField<<<dimGridCells,dimBlock,smem1,stream[0]>>>(electricFieldXGpu, electricFieldYGpu, electricPotentialGpu);
//computing force on each particle
forceCalculation<<<dimGridParticles,dimBlock,smem4,stream[0]>>>(positionXGpu, positionYGpu, currentCellXGpu, currentCellYGpu, electricFieldXGpu, electricFieldYGpu, forceXGpu, forceYGpu);
//updating particle positions
positionUpdate<<<dimGridParticles,dimBlock,smem4,stream[0]>>>
(positionXGpu, positionYGpu, velocityXGpu, velocityYGpu, forceXGpu, forceYGpu, forceXOldGpu, forceYOldGpu, currentCellXGpu, currentCellYGpu);
//copy new force to forceOld arrays
copyD2D<<<dimGridCells,dimBlock,0,stream[0]>>>(forceXOldGpu, forceXGpu);
copyD2D<<<dimGridCells,dimBlock,0,stream[1]>>>(forceYOldGpu, forceYGpu);
//simulate collisions
collisions<<<dimGridParticles,dimBlock,smem2,stream[0]>>>(states,velocityXGpu, velocityYGpu);
t += dt;
}
//copying results back to the host
cudaMemcpyAsync(xResult, positionXGpu, np*sizeof(double), cudaMemcpyDeviceToHost, stream[0]);
cudaMemcpyAsync(yResult, positionYGpu, np*sizeof(double), cudaMemcpyDeviceToHost, stream[1]);
//stop timer
cudaEventRecord(finish, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(finish);
cudaEventElapsedTime(&elapsedTime, start, finish);
//destroying cuda streams
for (int i=0; i<2; i++) {
cudaStreamDestroy(stream[i]);
}
//freeing dynamically allocated memory
cudaFree(positionXGpu), cudaFree(positionYGpu);
cudaFree(velocityXGpu), cudaFree(velocityYGpu);
cudaFree(currentCellXGpu), cudaFree(currentCellYGpu);
cudaFree(forceXGpu), cudaFree(forceYGpu);
cudaFree(forceXOldGpu), cudaFree(forceYOldGpu);
cudaFree(chargeDensityGpu);
cudaFree(electricPotentialGpu);
cudaFree(electricFieldXGpu), cudaFree(electricFieldYGpu);
printf("finished on gpu\n");
//return elapsed time in seconds
return (elapsedTime/1000);
}
|
882ebf175deaea61599bbac339d79f9ffc196205.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include "PNG.h"
__global__ void boxFilter(const unsigned char* in, unsigned char* out, const int imageWidth, const int imageHeight, const int halfBoxWidth, const int halfBoxHeight)
{
int x = blockIdx.x;
int y = blockIdx.y;
int count = 0;
int index = (x + y * imageWidth) * 4;
unsigned int total[4] = { 0, 0, 0, 0 };
for (int i = -halfBoxWidth; i <= halfBoxWidth; i++)
{
for (int j = -halfBoxHeight; j <= halfBoxHeight; j++)
{
int cx = x + i;
int cy = y + j;
if (cx >= 0 && cy >= 0 && cx < imageWidth && cy < imageHeight)
{
int adjIndex = (cx + cy * imageWidth) * 4;
for (int c = 0; c < 4; c++)
{
total[c] += static_cast<unsigned int>(in[adjIndex + c]);
}
count++;
}
}
}
out[index] = static_cast<unsigned char>(total[0] / count);
out[index + 1] = static_cast<unsigned char>(total[1] / count);
out[index + 2] = static_cast<unsigned char>(total[2] / count);
out[index + 3] = static_cast<unsigned char>(total[3] / count);
}
int main(int arg, char* args[])
{
int filterWidth = 10;
int filterHeight = 10;
if (arg > 2)
{
filterWidth = std::atoi(args[1]);
filterHeight = std::atoi(args[2]);
}
PNG inPng("Lenna.png");
PNG outPng;
outPng.Create(inPng.w, inPng.h);
//store width and height so we can use them for our output image later
const unsigned int w = inPng.w;
const unsigned int h = inPng.h;
//4 because there are 4 color channels R, G, B, and A
int size = w * h * 4;
unsigned char *in = 0;
unsigned char *out = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess)
{
std::cout << "No CUDA devices found!" << std::endl;
exit(1);
}
//prints the device the kernel will be running on
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
std::cout << "Using device: " << prop.name << std::endl;
// Allocate GPU buffers for the images
hipMalloc((void**)&in, size * sizeof(unsigned char));
hipMalloc((void**)&out, size * sizeof(unsigned char));
// Copy image data from host memory to GPU buffers.
hipMemcpy(in, &inPng.data[0], size * sizeof(unsigned char), hipMemcpyHostToDevice);
//free the input image because we do not need it anymore
inPng.Free();
// Launch a kernel on the GPU with one thread for each element.
dim3 block_size(w, h);
dim3 grid_size(1);
hipLaunchKernelGGL(( boxFilter), dim3(block_size), dim3(1), 0, 0, in, out, w, h, filterWidth, filterHeight);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
std::cout << "Kernel launch failed: " << hipGetErrorString(cudaStatus) << std::endl;
hipFree(in);
hipFree(out);
exit(1);
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
{
std::cout << "Could not synchronize device!" << std::endl;
hipFree(in);
hipFree(out);
exit(1);
}
//temporary array to store the result from opencl
auto tmp = new unsigned char[w * h * 4];
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(tmp, out, size * sizeof(unsigned char), hipMemcpyDeviceToHost);
hipFree(in);
hipFree(out);
//copy the data from the temp array to the png
std::copy(&tmp[0], &tmp[w * h * 4], std::back_inserter(outPng.data));
//write the image to file
outPng.Save("cuda_tutorial_3.png");
//free the iamge's resources since we are done with it
outPng.Free();
//free the temp array
delete[] tmp;
if (cudaStatus != hipSuccess)
{
std::cout << "Could not copy buffer memory to host!" << std::endl;
exit(1);
}
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess)
{
std::cout << "Device reset failed!" << std::endl;
exit(1);
}
return 0;
} | 882ebf175deaea61599bbac339d79f9ffc196205.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include "PNG.h"
__global__ void boxFilter(const unsigned char* in, unsigned char* out, const int imageWidth, const int imageHeight, const int halfBoxWidth, const int halfBoxHeight)
{
int x = blockIdx.x;
int y = blockIdx.y;
int count = 0;
int index = (x + y * imageWidth) * 4;
unsigned int total[4] = { 0, 0, 0, 0 };
for (int i = -halfBoxWidth; i <= halfBoxWidth; i++)
{
for (int j = -halfBoxHeight; j <= halfBoxHeight; j++)
{
int cx = x + i;
int cy = y + j;
if (cx >= 0 && cy >= 0 && cx < imageWidth && cy < imageHeight)
{
int adjIndex = (cx + cy * imageWidth) * 4;
for (int c = 0; c < 4; c++)
{
total[c] += static_cast<unsigned int>(in[adjIndex + c]);
}
count++;
}
}
}
out[index] = static_cast<unsigned char>(total[0] / count);
out[index + 1] = static_cast<unsigned char>(total[1] / count);
out[index + 2] = static_cast<unsigned char>(total[2] / count);
out[index + 3] = static_cast<unsigned char>(total[3] / count);
}
int main(int arg, char* args[])
{
int filterWidth = 10;
int filterHeight = 10;
if (arg > 2)
{
filterWidth = std::atoi(args[1]);
filterHeight = std::atoi(args[2]);
}
PNG inPng("Lenna.png");
PNG outPng;
outPng.Create(inPng.w, inPng.h);
//store width and height so we can use them for our output image later
const unsigned int w = inPng.w;
const unsigned int h = inPng.h;
//4 because there are 4 color channels R, G, B, and A
int size = w * h * 4;
unsigned char *in = 0;
unsigned char *out = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
std::cout << "No CUDA devices found!" << std::endl;
exit(1);
}
//prints the device the kernel will be running on
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
std::cout << "Using device: " << prop.name << std::endl;
// Allocate GPU buffers for the images
cudaMalloc((void**)&in, size * sizeof(unsigned char));
cudaMalloc((void**)&out, size * sizeof(unsigned char));
// Copy image data from host memory to GPU buffers.
cudaMemcpy(in, &inPng.data[0], size * sizeof(unsigned char), cudaMemcpyHostToDevice);
//free the input image because we do not need it anymore
inPng.Free();
// Launch a kernel on the GPU with one thread for each element.
dim3 block_size(w, h);
dim3 grid_size(1);
boxFilter<<<block_size, 1>>>(in, out, w, h, filterWidth, filterHeight);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
std::cout << "Kernel launch failed: " << cudaGetErrorString(cudaStatus) << std::endl;
cudaFree(in);
cudaFree(out);
exit(1);
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
std::cout << "Could not synchronize device!" << std::endl;
cudaFree(in);
cudaFree(out);
exit(1);
}
//temporary array to store the result from opencl
auto tmp = new unsigned char[w * h * 4];
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(tmp, out, size * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(in);
cudaFree(out);
//copy the data from the temp array to the png
std::copy(&tmp[0], &tmp[w * h * 4], std::back_inserter(outPng.data));
//write the image to file
outPng.Save("cuda_tutorial_3.png");
//free the iamge's resources since we are done with it
outPng.Free();
//free the temp array
delete[] tmp;
if (cudaStatus != cudaSuccess)
{
std::cout << "Could not copy buffer memory to host!" << std::endl;
exit(1);
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess)
{
std::cout << "Device reset failed!" << std::endl;
exit(1);
}
return 0;
} |
56adfc0c16858a14563fde6276ddcd70e48572c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <torch/library.h>
#include <ATen/hip/Atomic.cuh>
#include "cuda_helpers.h"
namespace vision {
namespace ops {
namespace {
template <typename T>
__device__ T bilinear_interpolate(
const T* input,
int height,
int width,
T y,
T x,
int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = input[y_low * width + x_low];
T v2 = input[y_low * width + x_high];
T v3 = input[y_high * width + x_low];
T v4 = input[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void ps_roi_align_forward_kernel_impl(
int nthreads,
const T* input,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
int sampling_ratio,
const T* rois,
int channels_out,
T* output,
int* channel_mapping) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c_out, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c_out = (index / pooled_width / pooled_height) % channels_out;
int n = index / pooled_width / pooled_height / channels_out;
// (n, c_in, ph, pw) is the associated element in the input
int c_in = (c_out * pooled_height + ph) * pooled_width + pw;
// [start, end) interval for spatial sampling
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5);
T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5);
T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5);
T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5);
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
// Do not using floor/ceil; this implementation detail is critical
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
const T* offset_input =
input + (roi_batch_ind * channels + c_in) * height * width;
T out_sum = 0;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = hstart +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = wstart +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(offset_input, height, width, y, x, index);
out_sum += val;
}
}
out_sum /= count;
output[index] = out_sum;
channel_mapping[index] = c_in;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
int height,
int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = input[y_low * width + x_low];
// T v2 = input[y_low * width + x_high];
// T v3 = input[y_high * width + x_low];
// T v4 = input[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
}
template <typename T>
__global__ void ps_roi_align_backward_kernel_impl(
int nthreads,
const T* grad_output,
const int* channel_mapping,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
int sampling_ratio,
int channels_out,
T* grad_input,
const T* rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, *, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / channels_out;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5);
T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5);
T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5);
T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5);
// Force too small ROIs to be 1x1
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int c_in = channel_mapping[index];
T* grad_input_offset =
grad_input + (roi_batch_ind * channels + c_in) * height * width;
// Do not using floor/ceil; this implementation detail is critical
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
const T grad_output_this_bin = grad_output[index];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = hstart +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = wstart +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = grad_output_this_bin * w1 / count;
T g2 = grad_output_this_bin * w2 / count;
T g3 = grad_output_this_bin * w3 / count;
T g4 = grad_output_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
gpuAtomicAdd(grad_input_offset + y_low * width + x_low, g1);
gpuAtomicAdd(grad_input_offset + y_low * width + x_high, g2);
gpuAtomicAdd(grad_input_offset + y_high * width + x_low, g3);
gpuAtomicAdd(grad_input_offset + y_high * width + x_high, g4);
} // if
} // ix
} // iy
}
}
std::tuple<at::Tensor, at::Tensor> ps_roi_align_forward_kernel(
const at::Tensor& input,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t sampling_ratio) {
// Check if input tensors are CUDA tensors
TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ps_roi_align_forward_kernel";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
TORCH_CHECK(
channels % (pooled_height * pooled_width) == 0,
"input channels must be a multiple of pooling height * pooling width");
int channels_out = channels / (pooled_height * pooled_width);
auto output = at::zeros(
{num_rois, channels_out, pooled_height, pooled_width}, input.options());
auto channel_mapping =
at::zeros(output.sizes(), input.options().dtype(at::kInt));
auto output_size = output.numel();
if (output_size == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(output, channel_mapping);
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
auto input_ = input.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "ps_roi_align_forward_kernel", [&] {
hipLaunchKernelGGL(( ps_roi_align_forward_kernel_impl<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input_.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois_.data_ptr<scalar_t>(),
channels_out,
output.data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>());
});
AT_CUDA_CHECK(hipGetLastError());
hipDeviceSynchronize();
return std::make_tuple(output, channel_mapping);
}
at::Tensor ps_roi_align_backward_kernel(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& channel_mapping,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t sampling_ratio,
int64_t batch_size,
int64_t channels,
int64_t height,
int64_t width) {
// Check if input tensors are CUDA tensors
TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
channel_mapping.is_cuda(), "channel_mapping must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
channel_mapping_t{channel_mapping, "channel_mapping", 3};
at::CheckedFrom c = "ps_roi_align_backward_kernel";
at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device());
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
int channels_out = channels / (pooled_height * pooled_width);
at::globalContext().alertNotDeterministic("ps_roi_align_backward_kernel");
auto grad_ = grad.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "ps_roi_align_backward_kernel", [&] {
hipLaunchKernelGGL(( ps_roi_align_backward_kernel_impl<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad_.data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
channels_out,
grad_input.data_ptr<scalar_t>(),
rois_.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, CUDA, m) {
m.impl(
TORCH_SELECTIVE_NAME("torchvision::ps_roi_align"),
TORCH_FN(ps_roi_align_forward_kernel));
m.impl(
TORCH_SELECTIVE_NAME("torchvision::_ps_roi_align_backward"),
TORCH_FN(ps_roi_align_backward_kernel));
}
} // namespace ops
} // namespace vision
| 56adfc0c16858a14563fde6276ddcd70e48572c0.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <torch/library.h>
#include <ATen/cuda/Atomic.cuh>
#include "cuda_helpers.h"
namespace vision {
namespace ops {
namespace {
template <typename T>
__device__ T bilinear_interpolate(
const T* input,
int height,
int width,
T y,
T x,
int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = input[y_low * width + x_low];
T v2 = input[y_low * width + x_high];
T v3 = input[y_high * width + x_low];
T v4 = input[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void ps_roi_align_forward_kernel_impl(
int nthreads,
const T* input,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
int sampling_ratio,
const T* rois,
int channels_out,
T* output,
int* channel_mapping) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c_out, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c_out = (index / pooled_width / pooled_height) % channels_out;
int n = index / pooled_width / pooled_height / channels_out;
// (n, c_in, ph, pw) is the associated element in the input
int c_in = (c_out * pooled_height + ph) * pooled_width + pw;
// [start, end) interval for spatial sampling
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5);
T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5);
T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5);
T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5);
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
// Do not using floor/ceil; this implementation detail is critical
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
const T* offset_input =
input + (roi_batch_ind * channels + c_in) * height * width;
T out_sum = 0;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = hstart +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = wstart +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(offset_input, height, width, y, x, index);
out_sum += val;
}
}
out_sum /= count;
output[index] = out_sum;
channel_mapping[index] = c_in;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
int height,
int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = input[y_low * width + x_low];
// T v2 = input[y_low * width + x_high];
// T v3 = input[y_high * width + x_low];
// T v4 = input[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
}
template <typename T>
__global__ void ps_roi_align_backward_kernel_impl(
int nthreads,
const T* grad_output,
const int* channel_mapping,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
int sampling_ratio,
int channels_out,
T* grad_input,
const T* rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, *, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / channels_out;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5);
T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5);
T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5);
T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5);
// Force too small ROIs to be 1x1
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int c_in = channel_mapping[index];
T* grad_input_offset =
grad_input + (roi_batch_ind * channels + c_in) * height * width;
// Do not using floor/ceil; this implementation detail is critical
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
const T grad_output_this_bin = grad_output[index];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = hstart +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = wstart +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = grad_output_this_bin * w1 / count;
T g2 = grad_output_this_bin * w2 / count;
T g3 = grad_output_this_bin * w3 / count;
T g4 = grad_output_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
gpuAtomicAdd(grad_input_offset + y_low * width + x_low, g1);
gpuAtomicAdd(grad_input_offset + y_low * width + x_high, g2);
gpuAtomicAdd(grad_input_offset + y_high * width + x_low, g3);
gpuAtomicAdd(grad_input_offset + y_high * width + x_high, g4);
} // if
} // ix
} // iy
}
}
std::tuple<at::Tensor, at::Tensor> ps_roi_align_forward_kernel(
const at::Tensor& input,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t sampling_ratio) {
// Check if input tensors are CUDA tensors
TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ps_roi_align_forward_kernel";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::cuda::CUDAGuard device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
TORCH_CHECK(
channels % (pooled_height * pooled_width) == 0,
"input channels must be a multiple of pooling height * pooling width");
int channels_out = channels / (pooled_height * pooled_width);
auto output = at::zeros(
{num_rois, channels_out, pooled_height, pooled_width}, input.options());
auto channel_mapping =
at::zeros(output.sizes(), input.options().dtype(at::kInt));
auto output_size = output.numel();
if (output_size == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(output, channel_mapping);
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
auto input_ = input.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "ps_roi_align_forward_kernel", [&] {
ps_roi_align_forward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input_.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois_.data_ptr<scalar_t>(),
channels_out,
output.data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>());
});
AT_CUDA_CHECK(cudaGetLastError());
cudaDeviceSynchronize();
return std::make_tuple(output, channel_mapping);
}
at::Tensor ps_roi_align_backward_kernel(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& channel_mapping,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t sampling_ratio,
int64_t batch_size,
int64_t channels,
int64_t height,
int64_t width) {
// Check if input tensors are CUDA tensors
TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
channel_mapping.is_cuda(), "channel_mapping must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
channel_mapping_t{channel_mapping, "channel_mapping", 3};
at::CheckedFrom c = "ps_roi_align_backward_kernel";
at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::cuda::CUDAGuard device_guard(grad.device());
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
int channels_out = channels / (pooled_height * pooled_width);
at::globalContext().alertNotDeterministic("ps_roi_align_backward_kernel");
auto grad_ = grad.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "ps_roi_align_backward_kernel", [&] {
ps_roi_align_backward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad_.data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
channels_out,
grad_input.data_ptr<scalar_t>(),
rois_.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, CUDA, m) {
m.impl(
TORCH_SELECTIVE_NAME("torchvision::ps_roi_align"),
TORCH_FN(ps_roi_align_forward_kernel));
m.impl(
TORCH_SELECTIVE_NAME("torchvision::_ps_roi_align_backward"),
TORCH_FN(ps_roi_align_backward_kernel));
}
} // namespace ops
} // namespace vision
|
21f587a1e44dbc685fd1a238a523a0527e2f303c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Cambricon Corporation: 2018-2019 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2019, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SGDUpdate(int N, Dtype* g, Dtype* h,
Dtype momentum, Dtype local_rate) {
CUDA_KERNEL_LOOP(i, N) {
g[i] = h[i] = momentum*h[i] + local_rate*g[i];
}
}
template <typename Dtype>
void sgd_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum,
Dtype local_rate) {
SGDUpdate<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, g, h, momentum, local_rate);
CUDA_POST_KERNEL_CHECK;
}
template void sgd_update_gpu<float>(int, float*, float*, float, float);
template void sgd_update_gpu<double>(int, double*, double*, double, double);
} // namespace caffe
| 21f587a1e44dbc685fd1a238a523a0527e2f303c.cu | /*
All modification made by Cambricon Corporation: © 2018-2019 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2019, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SGDUpdate(int N, Dtype* g, Dtype* h,
Dtype momentum, Dtype local_rate) {
CUDA_KERNEL_LOOP(i, N) {
g[i] = h[i] = momentum*h[i] + local_rate*g[i];
}
}
template <typename Dtype>
void sgd_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum,
Dtype local_rate) {
SGDUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, g, h, momentum, local_rate);
CUDA_POST_KERNEL_CHECK;
}
template void sgd_update_gpu<float>(int, float*, float*, float, float);
template void sgd_update_gpu<double>(int, double*, double*, double, double);
} // namespace caffe
|
ff6603763c03d946f1f555372d53301661721b50.hip | // !!! This is a file automatically generated by hipify!!!
#include "layer.h"
#include <random>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <cassert>
#include <math.h>
#include <algorithm>
#include <sstream>
#include <fstream>
#include <iostream>
#include <cstring>
using namespace cudl;
using namespace std;
/****************************************************************
* Layer definition *
****************************************************************/
Layer::Layer()
{
/* do nothing */
}
Layer::~Layer()
{
#if (DEBUG_FORWARD > 0 || DEBUG_BACKWARD > 0)
std::cout << "Destroy Layer: " << name_ << std::endl;
#endif
if (output_ != nullptr) { delete output_; output_ = nullptr; }
if (grad_input_ != nullptr) { delete grad_input_; grad_input_ = nullptr; }
if (weights_ != nullptr) { delete weights_; weights_ = nullptr; }
if (biases_ != nullptr) { delete biases_; biases_ = nullptr; }
if (grad_weights_ != nullptr) { delete grad_weights_; grad_weights_ = nullptr; }
if (grad_biases_ != nullptr) { delete grad_biases_; grad_biases_ = nullptr; }
}
void Layer::init_weight_bias(unsigned int seed)
{
checkCudaErrors(hipDeviceSynchronize());
if (weights_ == nullptr || biases_ == nullptr)
return;
// Create random network
std::random_device rd;
std::mt19937 gen(seed == 0 ? rd() : static_cast<unsigned int>(seed));
// He uniform distribution
float range = sqrt(6.f / input_->size()); // He's initialization
std::uniform_real_distribution<> dis(-range, range);
for (int i = 0; i < weights_->len(); i++)
weights_->ptr()[i] = static_cast<float>(dis(gen));
for (int i = 0; i < biases_->len(); i++)
biases_->ptr()[i] = 0.f;
// copy initialized value to the device
weights_->to(DeviceType::cuda);
biases_->to(DeviceType::cuda);
std::cout << ".. initialized " << name_ << " layer .." << std::endl;
}
void Layer::update_weights_biases(float learning_rate)
{
float eps = -1.f * learning_rate;
if (weights_ != nullptr && grad_weights_ != nullptr)
{
#if (DEBUG_UPDATE)
weights_->print(name_ + "::weights (before update)", true);
grad_weights_->print(name_ + "::gweights", true);
#endif // DEBUG_UPDATE
// w = w + eps * dw
checkCublasErrors(
hipblasSaxpy(cuda_->cublas(),
weights_->len(),
&eps,
grad_weights_->cuda(), 1,
weights_->cuda(), 1));
#if (DEBUG_UPDATE)
weights_->print(name_ + "weights (after update)", true);
// getchar();
#endif // DEBUG_UPDATE
}
if (biases_ != nullptr && grad_biases_ != nullptr)
{
#if (DEBUG_UPDATE)
biases_->print(name_ + "biases (before update)", true);
grad_biases_->print(name_ + "gbiases", true);
#endif // DEBUG_UPDATE
// b = b + eps * db
checkCublasErrors(
hipblasSaxpy(cuda_->cublas(),
biases_->len(),
&eps,
grad_biases_->cuda(), 1,
biases_->cuda(), 1));
#if (DEBUG_UPDATE)
biases_->print(name_ + "biases (after update)", true);
// getchar();
#endif // DEBUG_UPDATE
}
}
Blob<float> *Layer::get_gradient(Blob<float> *target)
{
// set grad_input_ as predict
checkCudaErrors(hipMemcpyAsync(grad_input_->cuda(),
output_->cuda(), output_->buf_size(),
hipMemcpyDeviceToDevice));
// set grad_input_ = predict - target
checkCublasErrors(
hipblasSaxpy(cuda_->cublas(), target->len(),
&cuda_->minus_one, target->cuda(), 1,
grad_input_->cuda(), 1));
// normalize the grad_output by the batch size
int grad_output_size = target->n() * target->c() * target->h() * target->w();
float scale = 1.f / static_cast<float>(target->n());
checkCublasErrors(hipblasSscal(cuda_->cublas(), grad_output_size, &scale, grad_input_->cuda(), 1));
#if (DEBUG_SOFTMAX & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
input_->print( name_ + "::input", true);
output_->print(name_ + "::predict", true);
target->print( name_ + "::y", true, target->n());
grad_input_->print(name_ + "::dx", true, target->n());
#endif
return grad_input_;
}
float Layer::get_loss(Blob<float> *target)
{
int batch_size = output_->n();
int output_size = output_->size();
int width = target->w();
int height = target->h();
assert(batch_size == target->n());
assert(output_size == target->size());
float *h_output, *h_target;
float loss = 0.0;
// get predicts and targets
h_output = output_->to(host);
h_target = target->to(host);
// idx_output = idx_target = 0;
for (int b = 0; b < batch_size; b++)
{
for(int i = 0; i<width;i++){
for(int j = 0; j<height; j++){
if(h_output[b*output_size+(i*width+j)]==0.0){
h_output[b*output_size+(i*width+j)]=0.0000000001;
}
else if(h_output[b*output_size+(i*width+j)]==1.0){
h_output[b*output_size+(i*width+j)]=0.9999999999;
}
loss = loss + h_target[b*output_size+(i*width+j)]* log(h_output[b*output_size+(i*width+j)]) + (1.0-h_target[b*output_size+(i*width+j)])*log(1.0-h_output[b*output_size+(i*width+j)]);
}
}
}
return -loss/(batch_size*12544.0);
}
void Layer::show_result(Blob<float> *target)
{
int batch_size = output_->n();
int output_size = output_->size();
assert(batch_size == target->n());
assert(output_size == target->size());
int width = output_->w();
int height = output_->h();
auto out = new float [output_size];
auto targ = new int [output_size];
float *h_output;
float *h_target;
// get predicts
h_target = target->to(host);
h_output = output_->to(host);
float temp= h_output[0];
int b = rand()%(batch_size -1);
for (int i=0; i< height; i++){
for (int j=0; j<width; j++){
out [i*height + j] = h_output[b*output_size + i*height + j];
targ [i*height + j] = (int)round(h_target[b*output_size + i*height + j]);
if (h_output[b*output_size + i*height + j]>temp)
temp = h_output[b*output_size +i*height + j];
}
}
cout<<temp<<endl;
ofstream pred ("pred.txt");
ofstream gdt ("gdt.txt");
if (pred.is_open())
{
for(int count = 0; count < output_size; count ++){
pred << out[count] << " " ;
}
pred.close();
}
else cout << "Unable to open file";
if (gdt.is_open())
{
for(int count = 0; count < output_size; count ++){
gdt << targ[count] << " " ;
}
gdt.close();
}
else cout << "Unable to open file";
}
int Layer::get_accuracy(Blob<float> *target)
{
int batch_size = output_->n();
int output_size = output_->size();
int width = output_->w();
int height = output_->h();
//cout<<"out: "<<output_size<<endl;
assert(batch_size == target->n());
assert(output_size == target->size());
float *h_output, *h_target;
int hit_count = 0;
// get predicts and targets
h_output = output_->to(host);
h_target = target->to(host);
for (int b = 0; b< batch_size; b++){
for (int i =0; i<width; i++){
for (int j = 0; j< height; j++){
//cout<<h_output[b * output_size+(i*width+j)]<<endl;
if(h_output[b * output_size+(i*width+j)]<0.5 && h_target[b * output_size+(i*width+j)]==0) hit_count=hit_count+1;
else if(h_output[b * output_size+(i*width+j)]>=0.5 && h_target[b * output_size+(i*width+j)]==1) hit_count=hit_count+1;
}}}
return hit_count;
}
int Layer::load_parameter()
{
std::stringstream filename_weights, filename_biases;
// load weights and biases pretrained parameters
filename_weights << name_ << ".bin";
if (weights_->file_read(filename_weights.str()))
return -1;
filename_biases << name_ << ".bias.bin";
if (biases_->file_read(filename_biases.str()))
return -2;
std::cout << ".. loaded " << name_ << " pretrain parameter.." << std::endl;
return 0;
}
int Layer::save_parameter()
{
std::stringstream filename_weights, filename_biases;
std::cout << ".. saving " << name_ << " parameter ..";
// Write weights file
if (weights_)
{
filename_weights << name_ << ".bin";
if (weights_->file_write(filename_weights.str()))
return -1;
}
// Write bias file
if (biases_)
{
filename_biases << name_ << ".bias.bin";
if (biases_->file_write(filename_biases.str()))
return -2;
}
std::cout << " done .." << std::endl;
return 0;
}
/****************************************************************
* Dense Layer *
****************************************************************/
Dense::Dense(std::string name, int output_size)
{
name_ = name;
output_size_ = output_size;
}
Dense::~Dense()
{
if (d_one_vec != nullptr) { hipFree(d_one_vec); d_one_vec = nullptr; }
}
__global__ void init_one_vec(float* d_one_vec, size_t length)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= length) return;
d_one_vec[i] = 1.f;
}
void Dense::fwd_initialize(Blob<float> *input)
{
// initialize weights and biases
if (weights_ == nullptr)
{
// setup parameter size information
input_size_ = input->c() * input->h() * input->w();
// initialize weight, bias, and output
weights_ = new Blob<float>(1, 1, input_size_, output_size_);
biases_ = new Blob<float>(1, 1, output_size_);
}
// initilaize input and output
if (input_ == nullptr || batch_size_ != input->n())
{
input_ = input;
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(batch_size_, output_size_);
else
output_->reset(batch_size_, output_size_);
output_->tensor();
if (d_one_vec != nullptr)
hipFree(d_one_vec);
checkCudaErrors(hipMalloc((void**)&d_one_vec, sizeof(float) * batch_size_));
hipLaunchKernelGGL(( init_one_vec), dim3((batch_size_+BLOCK_DIM_1D-1)/BLOCK_DIM_1D), dim3(BLOCK_DIM_1D) , 0, 0, d_one_vec, batch_size_);
// initialize weights and biases
if (load_pretrain_ && !freeze_)
{
if (load_parameter())
{
std::cout << "error occurred.." << std::endl;
exit(-1);
}
}
else if (!freeze_)
{
init_weight_bias();
}
else
{
/* do nothing */
}
}
}
Blob<float> *Dense::forward(Blob<float> *input)
{
// output = weights^T * input (without biases)
checkCublasErrors(
hipblasSgemm(cuda_->cublas(),
HIPBLAS_OP_T, HIPBLAS_OP_N,
output_size_, batch_size_, input_size_,
&cuda_->one,
weights_->cuda(), input_size_,
input_->cuda(), input_size_,
&cuda_->zero,
output_->cuda(), output_size_));
// output += biases * d_one_vec^T
checkCublasErrors(hipblasSgemm(cuda_->cublas(),
HIPBLAS_OP_N, HIPBLAS_OP_N,
output_size_, batch_size_, 1,
&cuda_->one,
biases_->cuda(), output_size_,
d_one_vec, 1,
&cuda_->one,
output_->cuda(), output_size_));
#if (DEBUG_DENSE & 0x01)
input_->print( name_ + "::input", true);
weights_->print(name_ + "::weight", true);
biases_->print( name_ + "::bias", true);
output_->print( name_ + "::output", true);
#endif // DEBUG_DENSE
return output_;
}
void Dense::bwd_initialize(Blob<float> *grad_output)
{
if (grad_weights_ == nullptr)
{
grad_weights_ = new Blob<float>(weights_->shape());
grad_biases_ = new Blob<float>(biases_->shape());
}
if (grad_input_ == nullptr || batch_size_ != grad_output->n())
{
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
}
Blob<float> *Dense::backward(Blob<float> *grad_output)
{
// db = (dy) * d_one_vec
hipblasSgemv(cuda_->cublas(),
HIPBLAS_OP_N,
output_size_, batch_size_,
&cuda_->one,
grad_output_->cuda(), output_size_,
d_one_vec, 1,
&cuda_->zero,
grad_biases_->cuda(), 1);
// dw = x * (dy)^T
hipblasSgemm(cuda_->cublas(),
HIPBLAS_OP_N, HIPBLAS_OP_T,
input_size_, output_size_, batch_size_,
&cuda_->one,
input_->cuda(), input_size_,
grad_output_->cuda(), output_size_,
&cuda_->zero,
grad_weights_->cuda(), input_size_);
// dx = W * dy
if (!gradient_stop_)
hipblasSgemm(cuda_->cublas(),
HIPBLAS_OP_N, HIPBLAS_OP_N,
input_size_, batch_size_, output_size_,
&cuda_->one,
weights_->cuda(), input_size_,
grad_output_->cuda(), output_size_,
&cuda_->zero,
grad_input_->cuda(), input_size_);
#if (DEBUG_DENSE & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
grad_output->print( name_ + "::gradients", true, grad_output->n());
grad_weights_->print(name_ + "::gfilter", true);
grad_biases_->print( name_ + "::gbias", true);
if (!gradient_stop_)
grad_input_->print( name_ + "::gdata", true);
#endif // DEBUG_DENSE
return grad_input_;
}
/****************************************************************
* Activation Layer *
****************************************************************/
Activation::Activation(std::string name, cudnnActivationMode_t mode, float coef)
{
name_ = name;
act_mode_ = mode;
act_coef_ = coef;
cudnnCreateActivationDescriptor(&act_desc_);
cudnnSetActivationDescriptor(act_desc_, act_mode_, CUDNN_PROPAGATE_NAN, act_coef_);
}
Activation::~Activation()
{
cudnnDestroyActivationDescriptor(act_desc_);
}
void Activation::fwd_initialize(Blob<float> *input)
{
if (input_ == nullptr || batch_size_ != input->n())
{
input_ = input;
input_desc_ = input->tensor();
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(input->shape());
else
output_->reset(input->shape());
output_desc_ = output_->tensor();
}
}
Blob<float> *Activation::forward(Blob<float> *input)
{
cudnnActivationForward(cuda_->cudnn(),
act_desc_,
&cuda_->one,
input_desc_,
input->cuda(),
&cuda_->zero,
output_desc_,
output_->cuda());
return output_;
}
void Activation::bwd_initialize(Blob<float> *grad_output)
{
if (grad_input_ == nullptr || batch_size_ != grad_output->n())
{
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
}
Blob<float> *Activation::backward(Blob<float> *grad_output)
{
cudnnActivationBackward(cuda_->cudnn(),
act_desc_,
&cuda_->one,
output_desc_, output_->cuda(),
output_desc_, grad_output->cuda(),
input_desc_, input_->cuda(),
&cuda_->zero,
input_desc_, grad_input_->cuda());
return grad_input_;
}
/****************************************************************
* Softmax definition *
****************************************************************/
Softmax::Softmax(std::string name)
{
name_ = name;
}
Softmax::~Softmax()
{
// do nothing
}
void Softmax::fwd_initialize(Blob<float> *input)
{
if (input_ == nullptr || batch_size_ != input->n())
{
input_ = input;
input_desc_ = input->tensor();
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(input->shape());
else
output_->reset(input->shape());
output_desc_ = output_->tensor();
}
}
Blob<float> *Softmax::forward(Blob<float> *input)
{
#if (DEBUG_SOFTMAX & 0x01)
std::cout << name_ << "[FORWARD]" << std::endl;
input_->print(name_ + "::input", true, input->n());
#endif
checkCudnnErrors(
cudnnSoftmaxForward(cuda_->cudnn(), CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL,
&cuda_->one, input_desc_, input->cuda(),
&cuda_->zero, output_desc_, output_->cuda()));
#if (DEBUG_SOFTMAX & 0x01)
output_->print(name_ + "::output", true, input->n());
#endif
return output_;
}
void Softmax::bwd_initialize(Blob<float> *target)
{
if (grad_input_ == nullptr || batch_size_ != target->n())
{
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
}
Blob<float> *Softmax::backward(Blob<float> *target)
{
// set grad_input_ as predict
checkCudaErrors(hipMemcpyAsync(grad_input_->cuda(),
output_->cuda(), output_->buf_size(),
hipMemcpyDeviceToDevice));
// set grad_input_ = predict - target
checkCublasErrors(
hipblasSaxpy(cuda_->cublas(), target->len(),
&cuda_->minus_one, target->cuda(), 1,
grad_input_->cuda(), 1));
// normalize the grad_output by the batch size
int grad_output_size = target->n() * target->c() * target->h() * target->w();
float scale = 1.f / static_cast<float>(target->n());
checkCublasErrors(hipblasSscal(cuda_->cublas(), grad_output_size, &scale, grad_input_->cuda(), 1));
#if (DEBUG_SOFTMAX & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
input_->print( name_ + "::input", true);
output_->print(name_ + "::predict", true);
target->print( name_ + "::y", true, target->n());
grad_input_->print(name_ + "::dx", true, target->n());
#endif
return grad_input_;
}
float Softmax::get_loss(Blob<float> *target)
{
return loss_.loss(output_, target);
}
int Softmax::get_accuracy(Blob<float> *target)
{
int batch_size = output_->n();
int output_size = output_->size();
assert(batch_size == target->n());
assert(output_size == target->size());
float *h_output, *h_target;
int idx_output, idx_target;
int hit_count = 0;
// get predicts and targets
h_output = output_->to(host);
h_target = target->to(host);
// idx_output = idx_target = 0;
for (int b = 0; b < batch_size; b++)
{
idx_output = 0;
idx_target = 0;
for (int i = 1; i < 10; i++)
{
if (h_output[b * output_size + i] > h_output[b * output_size + idx_output])
idx_output = i;
if (h_target[b * output_size + i] > h_target[b * output_size + idx_target])
idx_target = i;
}
if (idx_output == idx_target)
hit_count++;
}
return hit_count;
}
/****************************************************************
* Layer definition *
****************************************************************/
/**
* Convolutional layer with bias
*/
Conv2D::Conv2D(std::string name,
int out_channels,
int kernel_size,
int stride,
int padding,
int dilation):
out_channels_(out_channels),
kernel_size_(kernel_size),
stride_(stride),
padding_(padding),
dilation_(dilation)
{
name_ = name;
// create cudnn container handles
cudnnCreateFilterDescriptor(&filter_desc_);
cudnnCreateConvolutionDescriptor(&conv_desc_);
checkCudnnErrors(cudnnSetConvolution2dDescriptor(conv_desc_,
padding_, padding_, stride_, stride_, dilation_, dilation_,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// setting cudnn convolution math type
// CUDNN_DEFAULT_MATH operates convolution with FP32.
// If you use A100, CUDNN utilise tensor cores with TF32.
checkCudnnErrors(cudnnSetConvolutionMathType(conv_desc_, CUDNN_DEFAULT_MATH));
d_workspace_ = nullptr;
}
Conv2D::~Conv2D()
{
// distroy cudnn container resources
cudnnDestroyFilterDescriptor(filter_desc_);
cudnnDestroyConvolutionDescriptor(conv_desc_);
// terminate internal created blobs
if (d_workspace_ != nullptr) { hipFree(d_workspace_); d_workspace_ = nullptr; }
}
void Conv2D::set_workspace()
{
size_t temp_size = 0;
// forward
#if CUDNN_MAJOR >= 7
std::vector<cudnnConvolutionFwdAlgoPerf_t> fwd_algo_perf_results(CUDNN_CONVOLUTION_FWD_ALGO_COUNT);
std::vector<cudnnConvolutionBwdFilterAlgoPerf_t> bwd_filter_algo_perf_results(CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT);
std::vector<cudnnConvolutionBwdDataAlgoPerf_t> bwd_data_algo_perf_results(CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT);
int algo_max_count;
int returnedAlgoCount = 0;
checkCudnnErrors(cudnnGetConvolutionForwardAlgorithmMaxCount(cuda_->cudnn(), &algo_max_count));
#if (DEBUG_FIND_ALGO & 1)
std::cout << this->name_ << ": Available Algorithm Count [FWD]: " << algo_max_count << std::endl;
checkCudnnErrors(cudnnFindConvolutionForwardAlgorithm(cuda_->cudnn(),
input_desc_, filter_desc_, conv_desc_, output_desc_,
algo_max_count, &returnedAlgoCount, &fwd_algo_perf_results[0]));
std::cout << "returned algo_count: " << returnedAlgoCount << std::endl;
for (int i = 0; i < returnedAlgoCount; i++)
std::cout << "fwd algo[" << i << "] time: " << fwd_algo_perf_results[i].time << ", memory: " << fwd_algo_perf_results[i].memory << std::endl;
#else
checkCudnnErrors(cudnnGetConvolutionForwardAlgorithm_v7(cuda_->cudnn(),
input_desc_, filter_desc_, conv_desc_, output_desc_,
algo_max_count, &returnedAlgoCount, &fwd_algo_perf_results[0]));
#endif
// shoose the fastest algorithm
conv_fwd_algo_ = fwd_algo_perf_results[0].algo;
#else
checkCudnnErrors(cudnnGetConvolutionForwardAlgorithm(cuda_->cudnn(),
input_desc_, filter_desc_, conv_desc_, output_desc_,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv_fwd_algo_));
#endif
checkCudnnErrors(cudnnGetConvolutionForwardWorkspaceSize(cuda_->cudnn(),
input_desc_, filter_desc_, conv_desc_, output_desc_,
conv_fwd_algo_, &temp_size));
workspace_size_ = ::max(workspace_size_, temp_size);
// bwd - filter
#if CUDNN_MAJOR >= 7
checkCudnnErrors(cudnnGetConvolutionBackwardFilterAlgorithmMaxCount(cuda_->cudnn(), &algo_max_count));
#if (DEBUG_FIND_ALGO & 1)
std::cout << this->name_ << ": Available Algorithm Count [BWD-filter]: " << algo_max_count << std::endl;
checkCudnnErrors(cudnnFindConvolutionBackwardFilterAlgorithm(cuda_->cudnn(),
input_desc_, output_desc_, conv_desc_, filter_desc_,
algo_max_count, &returnedAlgoCount, &bwd_filter_algo_perf_results[0]));
for (int i = 0; i < returnedAlgoCount; i++)
std::cout << "bwd filter algo[" << i << "] time: " << fwd_algo_perf_results[i].time << ", memory: " << fwd_algo_perf_results[i].memory << std::endl;
#else
checkCudnnErrors(cudnnGetConvolutionBackwardFilterAlgorithm_v7(cuda_->cudnn(),
input_desc_, output_desc_, conv_desc_, filter_desc_,
algo_max_count, &returnedAlgoCount, &bwd_filter_algo_perf_results[0]));
#endif
conv_bwd_filter_algo_ = bwd_filter_algo_perf_results[0].algo;
#else
checkCudnnErrors(cudnnGetConvolutionBackwardFilterAlgorithm(cuda_->cudnn(),
input_desc_, output_desc_, conv_desc_, filter_desc_,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &conv_bwd_filter_algo_));
#endif
checkCudnnErrors(cudnnGetConvolutionBackwardFilterWorkspaceSize(cuda_->cudnn(),
input_desc_, output_desc_, conv_desc_, filter_desc_,
conv_bwd_filter_algo_, &temp_size));
workspace_size_ = ::max(workspace_size_, temp_size);
// bwd - data
#if CUDNN_MAJOR >= 7
checkCudnnErrors(cudnnGetConvolutionBackwardDataAlgorithmMaxCount(cuda_->cudnn(), &algo_max_count));
#if (DEBUG_FIND_ALGO & 1)
std::cout << this->name_ << ": Available Algorithm Count [BWD-data]: " << algo_max_count << std::endl;
checkCudnnErrors(cudnnFindConvolutionBackwardDataAlgorithm(cuda_->cudnn(),
filter_desc_, output_desc_, conv_desc_, input_desc_,
algo_max_count, &returnedAlgoCount, &bwd_data_algo_perf_results[0]));
for (int i = 0; i < returnedAlgoCount; i++)
std::cout << "bwd data algo[" << i << "] time: " << fwd_algo_perf_results[i].time << ", memory: " << fwd_algo_perf_results[i].memory << std::endl;
#else
checkCudnnErrors(cudnnGetConvolutionBackwardDataAlgorithm_v7(cuda_->cudnn(),
filter_desc_, output_desc_, conv_desc_, input_desc_,
algo_max_count, &returnedAlgoCount, &bwd_data_algo_perf_results[0]));
#endif
conv_bwd_data_algo_ = bwd_data_algo_perf_results[0].algo;
#else
checkCudnnErrors(cudnnGetConvolutionBackwardDataAlgorithm(cuda_->cudnn(),
filter_desc_, output_desc_, conv_desc_, input_desc_,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &conv_bwd_data_algo_));
#endif
checkCudnnErrors(cudnnGetConvolutionBackwardDataWorkspaceSize(cuda_->cudnn(),
filter_desc_, output_desc_, conv_desc_, input_desc_,
conv_bwd_data_algo_, &temp_size));
workspace_size_ = ::max(workspace_size_, temp_size);
if (workspace_size_ > 0)
{
if (d_workspace_ != nullptr)
checkCudaErrors(hipFree(d_workspace_));
checkCudaErrors(hipMalloc((void**)&d_workspace_, workspace_size_));
}
}
void Conv2D::fwd_initialize(Blob<float> *input)
{
// initialize weights and bias
if (weights_ == nullptr)
{
// initialize containers handles
checkCudnnErrors(cudnnSetFilter4dDescriptor(filter_desc_,
CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
out_channels_, input->c(), kernel_size_, kernel_size_));
weights_ = new Blob<float>(out_channels_, input->c(), kernel_size_, kernel_size_);
biases_ = new Blob<float>(1, out_channels_); // bias size
bias_desc_ = biases_->tensor();
}
// initilaize input and output
if (input_ == nullptr || batch_size_ != input->n())
{
// initialize input
input_ = input;
input_desc_ = input->tensor();
batch_size_ = input->n();
// initilaize output
checkCudnnErrors(cudnnGetConvolution2dForwardOutputDim(
conv_desc_, input_desc_, filter_desc_,
&output_size_[0], &output_size_[1], &output_size_[2], &output_size_[3]));
if (output_ == nullptr)
output_ = new Blob<float>(output_size_);
else
output_->reset(output_size_);
output_desc_ = output_->tensor();
// initialize workspace for cudnn
set_workspace();
// initialize weights
if (load_pretrain_ && !freeze_)
{
if (load_parameter())
{
std::cout << "error occurred.." << std::endl;
exit(-1);
}
}
else if (!freeze_)
{
init_weight_bias();
}
else
{
/* do nothing */
}
}
}
Blob<float> *Conv2D::forward(Blob<float> *input)
{
checkCudnnErrors(cudnnConvolutionForward(cuda_->cudnn(),
&cuda_->one, input_desc_, input_->cuda(),
filter_desc_, weights_->cuda(), conv_desc_, conv_fwd_algo_, d_workspace_, workspace_size_,
&cuda_->zero, output_desc_, output_->cuda()));
checkCudnnErrors(cudnnAddTensor(cuda_->cudnn(),
&cuda_->one, bias_desc_, biases_->cuda(),
&cuda_->one, output_desc_, output_->cuda()));
#if (DEBUG_CONV & 0x01)
input_->print( name_ + "::input", true, input_->n(), 28);
weights_->print(name_ + "::weight", true);
biases_->print( name_ + "::bias", true);
output_->print( name_ + "::output", true);
#endif
return output_;
}
void Conv2D::bwd_initialize(Blob<float> *grad_output)
{
if (grad_weights_ == nullptr) {
grad_weights_ = new Blob<float>(weights_->shape());
grad_biases_ = new Blob<float>(1, biases_->c());
}
// initialize grad_output back-propagation space
if (grad_input_ == nullptr || batch_size_ != grad_output->n()) {
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
}
Blob<float> *Conv2D::backward(Blob<float> *grad_output)
{
// gradients of biases
checkCudnnErrors(
cudnnConvolutionBackwardBias(cuda_->cudnn(),
&cuda_->one,
output_desc_, grad_output->cuda(),
&cuda_->zero,
bias_desc_, grad_biases_->cuda()));
// gradients of weights
checkCudnnErrors(
cudnnConvolutionBackwardFilter(cuda_->cudnn(),
&cuda_->one,
input_desc_, input_->cuda(),
output_desc_, grad_output_->cuda(),
conv_desc_, conv_bwd_filter_algo_, d_workspace_, workspace_size_,
&cuda_->zero,
filter_desc_, grad_weights_->cuda()));
// gradients of input data
if (!gradient_stop_)
checkCudnnErrors(
cudnnConvolutionBackwardData(cuda_->cudnn(),
&cuda_->one,
filter_desc_, weights_->cuda(),
output_desc_, grad_output->cuda(),
conv_desc_, conv_bwd_data_algo_, d_workspace_, workspace_size_,
&cuda_->zero,
input_desc_, grad_input_->cuda()));
#if (DEBUG_CONV & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
grad_output->print( name_ + "::gradients", true);
grad_biases_->print(name_ + "gbias", true);
grad_weights_->print(name_+ "gfilter", true);
if (!gradient_stop_)
grad_input_->print(name_+"gdata", true);
#endif
#if (DEBUG_CONV & 0x04)
grad_output->print( name_ + "::gradients", true);
grad_biases_->print( name_ + "::gbias", true);
#endif
return grad_input_;
}
/****************************************************************
* Layer definition *
****************************************************************/
Pooling::Pooling(std::string name,
int kernel_size,
int padding,
int stride,
cudnnPoolingMode_t mode):
kernel_size_(kernel_size),
padding_(padding),
stride_(stride),
mode_(mode)
{
name_ = name;
cudnnCreatePoolingDescriptor(&pool_desc_);
cudnnSetPooling2dDescriptor(pool_desc_, mode_, CUDNN_PROPAGATE_NAN,
kernel_size_, kernel_size_, padding_, padding_, stride_, stride_);
}
Pooling::~Pooling()
{
cudnnDestroyPoolingDescriptor(pool_desc_);
}
void Pooling::fwd_initialize(Blob<float> *input)
{
if (input_ == nullptr || batch_size_ != input->n())
{
input_ = input;
// resource initialize
input_desc_ = input_->tensor();
batch_size_ = input->n();
// setting output
cudnnGetPooling2dForwardOutputDim(pool_desc_, input_desc_,
&output_size_[0], &output_size_[1], &output_size_[2], &output_size_[3]);
if (output_ == nullptr)
output_ = new Blob<float>(output_size_);
else
output_->reset(output_size_);
output_desc_ = output_->tensor();
}
}
Blob<float> *Pooling::forward(Blob<float> *input)
{
cudnnPoolingForward(cuda_->cudnn(), pool_desc_,
&cuda_->one, input_desc_, input_->cuda(),
&cuda_->zero, output_desc_, output_->cuda());
return output_;
}
void Pooling::bwd_initialize(Blob<float> *grad_output)
{
if (grad_input_ == nullptr || batch_size_ != grad_output->n())
{
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
}
Blob<float> *Pooling::backward(Blob<float> *grad_output)
{
checkCudnnErrors(
cudnnPoolingBackward(cuda_->cudnn(), pool_desc_,
&cuda_->one,
output_desc_, output_->cuda(),
output_desc_, grad_output->cuda(),
input_desc_, input_->cuda(),
&cuda_->zero,
input_desc_, grad_input_->cuda()));
return grad_input_;
}
| ff6603763c03d946f1f555372d53301661721b50.cu | #include "layer.h"
#include <random>
#include <cuda_runtime.h>
#include <curand.h>
#include <cassert>
#include <math.h>
#include <algorithm>
#include <sstream>
#include <fstream>
#include <iostream>
#include <cstring>
using namespace cudl;
using namespace std;
/****************************************************************
* Layer definition *
****************************************************************/
Layer::Layer()
{
/* do nothing */
}
Layer::~Layer()
{
#if (DEBUG_FORWARD > 0 || DEBUG_BACKWARD > 0)
std::cout << "Destroy Layer: " << name_ << std::endl;
#endif
if (output_ != nullptr) { delete output_; output_ = nullptr; }
if (grad_input_ != nullptr) { delete grad_input_; grad_input_ = nullptr; }
if (weights_ != nullptr) { delete weights_; weights_ = nullptr; }
if (biases_ != nullptr) { delete biases_; biases_ = nullptr; }
if (grad_weights_ != nullptr) { delete grad_weights_; grad_weights_ = nullptr; }
if (grad_biases_ != nullptr) { delete grad_biases_; grad_biases_ = nullptr; }
}
void Layer::init_weight_bias(unsigned int seed)
{
checkCudaErrors(cudaDeviceSynchronize());
if (weights_ == nullptr || biases_ == nullptr)
return;
// Create random network
std::random_device rd;
std::mt19937 gen(seed == 0 ? rd() : static_cast<unsigned int>(seed));
// He uniform distribution
float range = sqrt(6.f / input_->size()); // He's initialization
std::uniform_real_distribution<> dis(-range, range);
for (int i = 0; i < weights_->len(); i++)
weights_->ptr()[i] = static_cast<float>(dis(gen));
for (int i = 0; i < biases_->len(); i++)
biases_->ptr()[i] = 0.f;
// copy initialized value to the device
weights_->to(DeviceType::cuda);
biases_->to(DeviceType::cuda);
std::cout << ".. initialized " << name_ << " layer .." << std::endl;
}
void Layer::update_weights_biases(float learning_rate)
{
float eps = -1.f * learning_rate;
if (weights_ != nullptr && grad_weights_ != nullptr)
{
#if (DEBUG_UPDATE)
weights_->print(name_ + "::weights (before update)", true);
grad_weights_->print(name_ + "::gweights", true);
#endif // DEBUG_UPDATE
// w = w + eps * dw
checkCublasErrors(
cublasSaxpy(cuda_->cublas(),
weights_->len(),
&eps,
grad_weights_->cuda(), 1,
weights_->cuda(), 1));
#if (DEBUG_UPDATE)
weights_->print(name_ + "weights (after update)", true);
// getchar();
#endif // DEBUG_UPDATE
}
if (biases_ != nullptr && grad_biases_ != nullptr)
{
#if (DEBUG_UPDATE)
biases_->print(name_ + "biases (before update)", true);
grad_biases_->print(name_ + "gbiases", true);
#endif // DEBUG_UPDATE
// b = b + eps * db
checkCublasErrors(
cublasSaxpy(cuda_->cublas(),
biases_->len(),
&eps,
grad_biases_->cuda(), 1,
biases_->cuda(), 1));
#if (DEBUG_UPDATE)
biases_->print(name_ + "biases (after update)", true);
// getchar();
#endif // DEBUG_UPDATE
}
}
Blob<float> *Layer::get_gradient(Blob<float> *target)
{
// set grad_input_ as predict
checkCudaErrors(cudaMemcpyAsync(grad_input_->cuda(),
output_->cuda(), output_->buf_size(),
cudaMemcpyDeviceToDevice));
// set grad_input_ = predict - target
checkCublasErrors(
cublasSaxpy(cuda_->cublas(), target->len(),
&cuda_->minus_one, target->cuda(), 1,
grad_input_->cuda(), 1));
// normalize the grad_output by the batch size
int grad_output_size = target->n() * target->c() * target->h() * target->w();
float scale = 1.f / static_cast<float>(target->n());
checkCublasErrors(cublasSscal(cuda_->cublas(), grad_output_size, &scale, grad_input_->cuda(), 1));
#if (DEBUG_SOFTMAX & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
input_->print( name_ + "::input", true);
output_->print(name_ + "::predict", true);
target->print( name_ + "::y", true, target->n());
grad_input_->print(name_ + "::dx", true, target->n());
#endif
return grad_input_;
}
float Layer::get_loss(Blob<float> *target)
{
int batch_size = output_->n();
int output_size = output_->size();
int width = target->w();
int height = target->h();
assert(batch_size == target->n());
assert(output_size == target->size());
float *h_output, *h_target;
float loss = 0.0;
// get predicts and targets
h_output = output_->to(host);
h_target = target->to(host);
// idx_output = idx_target = 0;
for (int b = 0; b < batch_size; b++)
{
for(int i = 0; i<width;i++){
for(int j = 0; j<height; j++){
if(h_output[b*output_size+(i*width+j)]==0.0){
h_output[b*output_size+(i*width+j)]=0.0000000001;
}
else if(h_output[b*output_size+(i*width+j)]==1.0){
h_output[b*output_size+(i*width+j)]=0.9999999999;
}
loss = loss + h_target[b*output_size+(i*width+j)]* log(h_output[b*output_size+(i*width+j)]) + (1.0-h_target[b*output_size+(i*width+j)])*log(1.0-h_output[b*output_size+(i*width+j)]);
}
}
}
return -loss/(batch_size*12544.0);
}
void Layer::show_result(Blob<float> *target)
{
int batch_size = output_->n();
int output_size = output_->size();
assert(batch_size == target->n());
assert(output_size == target->size());
int width = output_->w();
int height = output_->h();
auto out = new float [output_size];
auto targ = new int [output_size];
float *h_output;
float *h_target;
// get predicts
h_target = target->to(host);
h_output = output_->to(host);
float temp= h_output[0];
int b = rand()%(batch_size -1);
for (int i=0; i< height; i++){
for (int j=0; j<width; j++){
out [i*height + j] = h_output[b*output_size + i*height + j];
targ [i*height + j] = (int)round(h_target[b*output_size + i*height + j]);
if (h_output[b*output_size + i*height + j]>temp)
temp = h_output[b*output_size +i*height + j];
}
}
cout<<temp<<endl;
ofstream pred ("pred.txt");
ofstream gdt ("gdt.txt");
if (pred.is_open())
{
for(int count = 0; count < output_size; count ++){
pred << out[count] << " " ;
}
pred.close();
}
else cout << "Unable to open file";
if (gdt.is_open())
{
for(int count = 0; count < output_size; count ++){
gdt << targ[count] << " " ;
}
gdt.close();
}
else cout << "Unable to open file";
}
int Layer::get_accuracy(Blob<float> *target)
{
int batch_size = output_->n();
int output_size = output_->size();
int width = output_->w();
int height = output_->h();
//cout<<"out: "<<output_size<<endl;
assert(batch_size == target->n());
assert(output_size == target->size());
float *h_output, *h_target;
int hit_count = 0;
// get predicts and targets
h_output = output_->to(host);
h_target = target->to(host);
for (int b = 0; b< batch_size; b++){
for (int i =0; i<width; i++){
for (int j = 0; j< height; j++){
//cout<<h_output[b * output_size+(i*width+j)]<<endl;
if(h_output[b * output_size+(i*width+j)]<0.5 && h_target[b * output_size+(i*width+j)]==0) hit_count=hit_count+1;
else if(h_output[b * output_size+(i*width+j)]>=0.5 && h_target[b * output_size+(i*width+j)]==1) hit_count=hit_count+1;
}}}
return hit_count;
}
int Layer::load_parameter()
{
std::stringstream filename_weights, filename_biases;
// load weights and biases pretrained parameters
filename_weights << name_ << ".bin";
if (weights_->file_read(filename_weights.str()))
return -1;
filename_biases << name_ << ".bias.bin";
if (biases_->file_read(filename_biases.str()))
return -2;
std::cout << ".. loaded " << name_ << " pretrain parameter.." << std::endl;
return 0;
}
int Layer::save_parameter()
{
std::stringstream filename_weights, filename_biases;
std::cout << ".. saving " << name_ << " parameter ..";
// Write weights file
if (weights_)
{
filename_weights << name_ << ".bin";
if (weights_->file_write(filename_weights.str()))
return -1;
}
// Write bias file
if (biases_)
{
filename_biases << name_ << ".bias.bin";
if (biases_->file_write(filename_biases.str()))
return -2;
}
std::cout << " done .." << std::endl;
return 0;
}
/****************************************************************
* Dense Layer *
****************************************************************/
Dense::Dense(std::string name, int output_size)
{
name_ = name;
output_size_ = output_size;
}
Dense::~Dense()
{
if (d_one_vec != nullptr) { cudaFree(d_one_vec); d_one_vec = nullptr; }
}
__global__ void init_one_vec(float* d_one_vec, size_t length)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= length) return;
d_one_vec[i] = 1.f;
}
void Dense::fwd_initialize(Blob<float> *input)
{
// initialize weights and biases
if (weights_ == nullptr)
{
// setup parameter size information
input_size_ = input->c() * input->h() * input->w();
// initialize weight, bias, and output
weights_ = new Blob<float>(1, 1, input_size_, output_size_);
biases_ = new Blob<float>(1, 1, output_size_);
}
// initilaize input and output
if (input_ == nullptr || batch_size_ != input->n())
{
input_ = input;
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(batch_size_, output_size_);
else
output_->reset(batch_size_, output_size_);
output_->tensor();
if (d_one_vec != nullptr)
cudaFree(d_one_vec);
checkCudaErrors(cudaMalloc((void**)&d_one_vec, sizeof(float) * batch_size_));
init_one_vec<<< (batch_size_+BLOCK_DIM_1D-1)/BLOCK_DIM_1D, BLOCK_DIM_1D >>>(d_one_vec, batch_size_);
// initialize weights and biases
if (load_pretrain_ && !freeze_)
{
if (load_parameter())
{
std::cout << "error occurred.." << std::endl;
exit(-1);
}
}
else if (!freeze_)
{
init_weight_bias();
}
else
{
/* do nothing */
}
}
}
Blob<float> *Dense::forward(Blob<float> *input)
{
// output = weights^T * input (without biases)
checkCublasErrors(
cublasSgemm(cuda_->cublas(),
CUBLAS_OP_T, CUBLAS_OP_N,
output_size_, batch_size_, input_size_,
&cuda_->one,
weights_->cuda(), input_size_,
input_->cuda(), input_size_,
&cuda_->zero,
output_->cuda(), output_size_));
// output += biases * d_one_vec^T
checkCublasErrors(cublasSgemm(cuda_->cublas(),
CUBLAS_OP_N, CUBLAS_OP_N,
output_size_, batch_size_, 1,
&cuda_->one,
biases_->cuda(), output_size_,
d_one_vec, 1,
&cuda_->one,
output_->cuda(), output_size_));
#if (DEBUG_DENSE & 0x01)
input_->print( name_ + "::input", true);
weights_->print(name_ + "::weight", true);
biases_->print( name_ + "::bias", true);
output_->print( name_ + "::output", true);
#endif // DEBUG_DENSE
return output_;
}
void Dense::bwd_initialize(Blob<float> *grad_output)
{
if (grad_weights_ == nullptr)
{
grad_weights_ = new Blob<float>(weights_->shape());
grad_biases_ = new Blob<float>(biases_->shape());
}
if (grad_input_ == nullptr || batch_size_ != grad_output->n())
{
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
}
Blob<float> *Dense::backward(Blob<float> *grad_output)
{
// db = (dy) * d_one_vec
cublasSgemv(cuda_->cublas(),
CUBLAS_OP_N,
output_size_, batch_size_,
&cuda_->one,
grad_output_->cuda(), output_size_,
d_one_vec, 1,
&cuda_->zero,
grad_biases_->cuda(), 1);
// dw = x * (dy)^T
cublasSgemm(cuda_->cublas(),
CUBLAS_OP_N, CUBLAS_OP_T,
input_size_, output_size_, batch_size_,
&cuda_->one,
input_->cuda(), input_size_,
grad_output_->cuda(), output_size_,
&cuda_->zero,
grad_weights_->cuda(), input_size_);
// dx = W * dy
if (!gradient_stop_)
cublasSgemm(cuda_->cublas(),
CUBLAS_OP_N, CUBLAS_OP_N,
input_size_, batch_size_, output_size_,
&cuda_->one,
weights_->cuda(), input_size_,
grad_output_->cuda(), output_size_,
&cuda_->zero,
grad_input_->cuda(), input_size_);
#if (DEBUG_DENSE & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
grad_output->print( name_ + "::gradients", true, grad_output->n());
grad_weights_->print(name_ + "::gfilter", true);
grad_biases_->print( name_ + "::gbias", true);
if (!gradient_stop_)
grad_input_->print( name_ + "::gdata", true);
#endif // DEBUG_DENSE
return grad_input_;
}
/****************************************************************
* Activation Layer *
****************************************************************/
Activation::Activation(std::string name, cudnnActivationMode_t mode, float coef)
{
name_ = name;
act_mode_ = mode;
act_coef_ = coef;
cudnnCreateActivationDescriptor(&act_desc_);
cudnnSetActivationDescriptor(act_desc_, act_mode_, CUDNN_PROPAGATE_NAN, act_coef_);
}
Activation::~Activation()
{
cudnnDestroyActivationDescriptor(act_desc_);
}
void Activation::fwd_initialize(Blob<float> *input)
{
if (input_ == nullptr || batch_size_ != input->n())
{
input_ = input;
input_desc_ = input->tensor();
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(input->shape());
else
output_->reset(input->shape());
output_desc_ = output_->tensor();
}
}
Blob<float> *Activation::forward(Blob<float> *input)
{
cudnnActivationForward(cuda_->cudnn(),
act_desc_,
&cuda_->one,
input_desc_,
input->cuda(),
&cuda_->zero,
output_desc_,
output_->cuda());
return output_;
}
void Activation::bwd_initialize(Blob<float> *grad_output)
{
if (grad_input_ == nullptr || batch_size_ != grad_output->n())
{
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
}
Blob<float> *Activation::backward(Blob<float> *grad_output)
{
cudnnActivationBackward(cuda_->cudnn(),
act_desc_,
&cuda_->one,
output_desc_, output_->cuda(),
output_desc_, grad_output->cuda(),
input_desc_, input_->cuda(),
&cuda_->zero,
input_desc_, grad_input_->cuda());
return grad_input_;
}
/****************************************************************
* Softmax definition *
****************************************************************/
Softmax::Softmax(std::string name)
{
name_ = name;
}
Softmax::~Softmax()
{
// do nothing
}
void Softmax::fwd_initialize(Blob<float> *input)
{
if (input_ == nullptr || batch_size_ != input->n())
{
input_ = input;
input_desc_ = input->tensor();
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(input->shape());
else
output_->reset(input->shape());
output_desc_ = output_->tensor();
}
}
Blob<float> *Softmax::forward(Blob<float> *input)
{
#if (DEBUG_SOFTMAX & 0x01)
std::cout << name_ << "[FORWARD]" << std::endl;
input_->print(name_ + "::input", true, input->n());
#endif
checkCudnnErrors(
cudnnSoftmaxForward(cuda_->cudnn(), CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL,
&cuda_->one, input_desc_, input->cuda(),
&cuda_->zero, output_desc_, output_->cuda()));
#if (DEBUG_SOFTMAX & 0x01)
output_->print(name_ + "::output", true, input->n());
#endif
return output_;
}
void Softmax::bwd_initialize(Blob<float> *target)
{
if (grad_input_ == nullptr || batch_size_ != target->n())
{
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
}
Blob<float> *Softmax::backward(Blob<float> *target)
{
// set grad_input_ as predict
checkCudaErrors(cudaMemcpyAsync(grad_input_->cuda(),
output_->cuda(), output_->buf_size(),
cudaMemcpyDeviceToDevice));
// set grad_input_ = predict - target
checkCublasErrors(
cublasSaxpy(cuda_->cublas(), target->len(),
&cuda_->minus_one, target->cuda(), 1,
grad_input_->cuda(), 1));
// normalize the grad_output by the batch size
int grad_output_size = target->n() * target->c() * target->h() * target->w();
float scale = 1.f / static_cast<float>(target->n());
checkCublasErrors(cublasSscal(cuda_->cublas(), grad_output_size, &scale, grad_input_->cuda(), 1));
#if (DEBUG_SOFTMAX & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
input_->print( name_ + "::input", true);
output_->print(name_ + "::predict", true);
target->print( name_ + "::y", true, target->n());
grad_input_->print(name_ + "::dx", true, target->n());
#endif
return grad_input_;
}
float Softmax::get_loss(Blob<float> *target)
{
return loss_.loss(output_, target);
}
int Softmax::get_accuracy(Blob<float> *target)
{
int batch_size = output_->n();
int output_size = output_->size();
assert(batch_size == target->n());
assert(output_size == target->size());
float *h_output, *h_target;
int idx_output, idx_target;
int hit_count = 0;
// get predicts and targets
h_output = output_->to(host);
h_target = target->to(host);
// idx_output = idx_target = 0;
for (int b = 0; b < batch_size; b++)
{
idx_output = 0;
idx_target = 0;
for (int i = 1; i < 10; i++)
{
if (h_output[b * output_size + i] > h_output[b * output_size + idx_output])
idx_output = i;
if (h_target[b * output_size + i] > h_target[b * output_size + idx_target])
idx_target = i;
}
if (idx_output == idx_target)
hit_count++;
}
return hit_count;
}
/****************************************************************
* Layer definition *
****************************************************************/
/**
* Convolutional layer with bias
*/
Conv2D::Conv2D(std::string name,
int out_channels,
int kernel_size,
int stride,
int padding,
int dilation):
out_channels_(out_channels),
kernel_size_(kernel_size),
stride_(stride),
padding_(padding),
dilation_(dilation)
{
name_ = name;
// create cudnn container handles
cudnnCreateFilterDescriptor(&filter_desc_);
cudnnCreateConvolutionDescriptor(&conv_desc_);
checkCudnnErrors(cudnnSetConvolution2dDescriptor(conv_desc_,
padding_, padding_, stride_, stride_, dilation_, dilation_,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// setting cudnn convolution math type
// CUDNN_DEFAULT_MATH operates convolution with FP32.
// If you use A100, CUDNN utilise tensor cores with TF32.
checkCudnnErrors(cudnnSetConvolutionMathType(conv_desc_, CUDNN_DEFAULT_MATH));
d_workspace_ = nullptr;
}
Conv2D::~Conv2D()
{
// distroy cudnn container resources
cudnnDestroyFilterDescriptor(filter_desc_);
cudnnDestroyConvolutionDescriptor(conv_desc_);
// terminate internal created blobs
if (d_workspace_ != nullptr) { cudaFree(d_workspace_); d_workspace_ = nullptr; }
}
void Conv2D::set_workspace()
{
size_t temp_size = 0;
// forward
#if CUDNN_MAJOR >= 7
std::vector<cudnnConvolutionFwdAlgoPerf_t> fwd_algo_perf_results(CUDNN_CONVOLUTION_FWD_ALGO_COUNT);
std::vector<cudnnConvolutionBwdFilterAlgoPerf_t> bwd_filter_algo_perf_results(CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT);
std::vector<cudnnConvolutionBwdDataAlgoPerf_t> bwd_data_algo_perf_results(CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT);
int algo_max_count;
int returnedAlgoCount = 0;
checkCudnnErrors(cudnnGetConvolutionForwardAlgorithmMaxCount(cuda_->cudnn(), &algo_max_count));
#if (DEBUG_FIND_ALGO & 1)
std::cout << this->name_ << ": Available Algorithm Count [FWD]: " << algo_max_count << std::endl;
checkCudnnErrors(cudnnFindConvolutionForwardAlgorithm(cuda_->cudnn(),
input_desc_, filter_desc_, conv_desc_, output_desc_,
algo_max_count, &returnedAlgoCount, &fwd_algo_perf_results[0]));
std::cout << "returned algo_count: " << returnedAlgoCount << std::endl;
for (int i = 0; i < returnedAlgoCount; i++)
std::cout << "fwd algo[" << i << "] time: " << fwd_algo_perf_results[i].time << ", memory: " << fwd_algo_perf_results[i].memory << std::endl;
#else
checkCudnnErrors(cudnnGetConvolutionForwardAlgorithm_v7(cuda_->cudnn(),
input_desc_, filter_desc_, conv_desc_, output_desc_,
algo_max_count, &returnedAlgoCount, &fwd_algo_perf_results[0]));
#endif
// shoose the fastest algorithm
conv_fwd_algo_ = fwd_algo_perf_results[0].algo;
#else
checkCudnnErrors(cudnnGetConvolutionForwardAlgorithm(cuda_->cudnn(),
input_desc_, filter_desc_, conv_desc_, output_desc_,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv_fwd_algo_));
#endif
checkCudnnErrors(cudnnGetConvolutionForwardWorkspaceSize(cuda_->cudnn(),
input_desc_, filter_desc_, conv_desc_, output_desc_,
conv_fwd_algo_, &temp_size));
workspace_size_ = std::max(workspace_size_, temp_size);
// bwd - filter
#if CUDNN_MAJOR >= 7
checkCudnnErrors(cudnnGetConvolutionBackwardFilterAlgorithmMaxCount(cuda_->cudnn(), &algo_max_count));
#if (DEBUG_FIND_ALGO & 1)
std::cout << this->name_ << ": Available Algorithm Count [BWD-filter]: " << algo_max_count << std::endl;
checkCudnnErrors(cudnnFindConvolutionBackwardFilterAlgorithm(cuda_->cudnn(),
input_desc_, output_desc_, conv_desc_, filter_desc_,
algo_max_count, &returnedAlgoCount, &bwd_filter_algo_perf_results[0]));
for (int i = 0; i < returnedAlgoCount; i++)
std::cout << "bwd filter algo[" << i << "] time: " << fwd_algo_perf_results[i].time << ", memory: " << fwd_algo_perf_results[i].memory << std::endl;
#else
checkCudnnErrors(cudnnGetConvolutionBackwardFilterAlgorithm_v7(cuda_->cudnn(),
input_desc_, output_desc_, conv_desc_, filter_desc_,
algo_max_count, &returnedAlgoCount, &bwd_filter_algo_perf_results[0]));
#endif
conv_bwd_filter_algo_ = bwd_filter_algo_perf_results[0].algo;
#else
checkCudnnErrors(cudnnGetConvolutionBackwardFilterAlgorithm(cuda_->cudnn(),
input_desc_, output_desc_, conv_desc_, filter_desc_,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &conv_bwd_filter_algo_));
#endif
checkCudnnErrors(cudnnGetConvolutionBackwardFilterWorkspaceSize(cuda_->cudnn(),
input_desc_, output_desc_, conv_desc_, filter_desc_,
conv_bwd_filter_algo_, &temp_size));
workspace_size_ = std::max(workspace_size_, temp_size);
// bwd - data
#if CUDNN_MAJOR >= 7
checkCudnnErrors(cudnnGetConvolutionBackwardDataAlgorithmMaxCount(cuda_->cudnn(), &algo_max_count));
#if (DEBUG_FIND_ALGO & 1)
std::cout << this->name_ << ": Available Algorithm Count [BWD-data]: " << algo_max_count << std::endl;
checkCudnnErrors(cudnnFindConvolutionBackwardDataAlgorithm(cuda_->cudnn(),
filter_desc_, output_desc_, conv_desc_, input_desc_,
algo_max_count, &returnedAlgoCount, &bwd_data_algo_perf_results[0]));
for (int i = 0; i < returnedAlgoCount; i++)
std::cout << "bwd data algo[" << i << "] time: " << fwd_algo_perf_results[i].time << ", memory: " << fwd_algo_perf_results[i].memory << std::endl;
#else
checkCudnnErrors(cudnnGetConvolutionBackwardDataAlgorithm_v7(cuda_->cudnn(),
filter_desc_, output_desc_, conv_desc_, input_desc_,
algo_max_count, &returnedAlgoCount, &bwd_data_algo_perf_results[0]));
#endif
conv_bwd_data_algo_ = bwd_data_algo_perf_results[0].algo;
#else
checkCudnnErrors(cudnnGetConvolutionBackwardDataAlgorithm(cuda_->cudnn(),
filter_desc_, output_desc_, conv_desc_, input_desc_,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &conv_bwd_data_algo_));
#endif
checkCudnnErrors(cudnnGetConvolutionBackwardDataWorkspaceSize(cuda_->cudnn(),
filter_desc_, output_desc_, conv_desc_, input_desc_,
conv_bwd_data_algo_, &temp_size));
workspace_size_ = std::max(workspace_size_, temp_size);
if (workspace_size_ > 0)
{
if (d_workspace_ != nullptr)
checkCudaErrors(cudaFree(d_workspace_));
checkCudaErrors(cudaMalloc((void**)&d_workspace_, workspace_size_));
}
}
void Conv2D::fwd_initialize(Blob<float> *input)
{
// initialize weights and bias
if (weights_ == nullptr)
{
// initialize containers handles
checkCudnnErrors(cudnnSetFilter4dDescriptor(filter_desc_,
CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
out_channels_, input->c(), kernel_size_, kernel_size_));
weights_ = new Blob<float>(out_channels_, input->c(), kernel_size_, kernel_size_);
biases_ = new Blob<float>(1, out_channels_); // bias size
bias_desc_ = biases_->tensor();
}
// initilaize input and output
if (input_ == nullptr || batch_size_ != input->n())
{
// initialize input
input_ = input;
input_desc_ = input->tensor();
batch_size_ = input->n();
// initilaize output
checkCudnnErrors(cudnnGetConvolution2dForwardOutputDim(
conv_desc_, input_desc_, filter_desc_,
&output_size_[0], &output_size_[1], &output_size_[2], &output_size_[3]));
if (output_ == nullptr)
output_ = new Blob<float>(output_size_);
else
output_->reset(output_size_);
output_desc_ = output_->tensor();
// initialize workspace for cudnn
set_workspace();
// initialize weights
if (load_pretrain_ && !freeze_)
{
if (load_parameter())
{
std::cout << "error occurred.." << std::endl;
exit(-1);
}
}
else if (!freeze_)
{
init_weight_bias();
}
else
{
/* do nothing */
}
}
}
Blob<float> *Conv2D::forward(Blob<float> *input)
{
checkCudnnErrors(cudnnConvolutionForward(cuda_->cudnn(),
&cuda_->one, input_desc_, input_->cuda(),
filter_desc_, weights_->cuda(), conv_desc_, conv_fwd_algo_, d_workspace_, workspace_size_,
&cuda_->zero, output_desc_, output_->cuda()));
checkCudnnErrors(cudnnAddTensor(cuda_->cudnn(),
&cuda_->one, bias_desc_, biases_->cuda(),
&cuda_->one, output_desc_, output_->cuda()));
#if (DEBUG_CONV & 0x01)
input_->print( name_ + "::input", true, input_->n(), 28);
weights_->print(name_ + "::weight", true);
biases_->print( name_ + "::bias", true);
output_->print( name_ + "::output", true);
#endif
return output_;
}
void Conv2D::bwd_initialize(Blob<float> *grad_output)
{
if (grad_weights_ == nullptr) {
grad_weights_ = new Blob<float>(weights_->shape());
grad_biases_ = new Blob<float>(1, biases_->c());
}
// initialize grad_output back-propagation space
if (grad_input_ == nullptr || batch_size_ != grad_output->n()) {
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
}
Blob<float> *Conv2D::backward(Blob<float> *grad_output)
{
// gradients of biases
checkCudnnErrors(
cudnnConvolutionBackwardBias(cuda_->cudnn(),
&cuda_->one,
output_desc_, grad_output->cuda(),
&cuda_->zero,
bias_desc_, grad_biases_->cuda()));
// gradients of weights
checkCudnnErrors(
cudnnConvolutionBackwardFilter(cuda_->cudnn(),
&cuda_->one,
input_desc_, input_->cuda(),
output_desc_, grad_output_->cuda(),
conv_desc_, conv_bwd_filter_algo_, d_workspace_, workspace_size_,
&cuda_->zero,
filter_desc_, grad_weights_->cuda()));
// gradients of input data
if (!gradient_stop_)
checkCudnnErrors(
cudnnConvolutionBackwardData(cuda_->cudnn(),
&cuda_->one,
filter_desc_, weights_->cuda(),
output_desc_, grad_output->cuda(),
conv_desc_, conv_bwd_data_algo_, d_workspace_, workspace_size_,
&cuda_->zero,
input_desc_, grad_input_->cuda()));
#if (DEBUG_CONV & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
grad_output->print( name_ + "::gradients", true);
grad_biases_->print(name_ + "gbias", true);
grad_weights_->print(name_+ "gfilter", true);
if (!gradient_stop_)
grad_input_->print(name_+"gdata", true);
#endif
#if (DEBUG_CONV & 0x04)
grad_output->print( name_ + "::gradients", true);
grad_biases_->print( name_ + "::gbias", true);
#endif
return grad_input_;
}
/****************************************************************
* Layer definition *
****************************************************************/
Pooling::Pooling(std::string name,
int kernel_size,
int padding,
int stride,
cudnnPoolingMode_t mode):
kernel_size_(kernel_size),
padding_(padding),
stride_(stride),
mode_(mode)
{
name_ = name;
cudnnCreatePoolingDescriptor(&pool_desc_);
cudnnSetPooling2dDescriptor(pool_desc_, mode_, CUDNN_PROPAGATE_NAN,
kernel_size_, kernel_size_, padding_, padding_, stride_, stride_);
}
Pooling::~Pooling()
{
cudnnDestroyPoolingDescriptor(pool_desc_);
}
void Pooling::fwd_initialize(Blob<float> *input)
{
if (input_ == nullptr || batch_size_ != input->n())
{
input_ = input;
// resource initialize
input_desc_ = input_->tensor();
batch_size_ = input->n();
// setting output
cudnnGetPooling2dForwardOutputDim(pool_desc_, input_desc_,
&output_size_[0], &output_size_[1], &output_size_[2], &output_size_[3]);
if (output_ == nullptr)
output_ = new Blob<float>(output_size_);
else
output_->reset(output_size_);
output_desc_ = output_->tensor();
}
}
Blob<float> *Pooling::forward(Blob<float> *input)
{
cudnnPoolingForward(cuda_->cudnn(), pool_desc_,
&cuda_->one, input_desc_, input_->cuda(),
&cuda_->zero, output_desc_, output_->cuda());
return output_;
}
void Pooling::bwd_initialize(Blob<float> *grad_output)
{
if (grad_input_ == nullptr || batch_size_ != grad_output->n())
{
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
}
Blob<float> *Pooling::backward(Blob<float> *grad_output)
{
checkCudnnErrors(
cudnnPoolingBackward(cuda_->cudnn(), pool_desc_,
&cuda_->one,
output_desc_, output_->cuda(),
output_desc_, grad_output->cuda(),
input_desc_, input_->cuda(),
&cuda_->zero,
input_desc_, grad_input_->cuda()));
return grad_input_;
}
|
ebdee4e1f35a8d511117e17493f93384179e5b11.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <math.h>
#define point_size 30000
static int *d_VMap;
static int *d_RMap;
static float *d_point_x = new float[point_size];
static float *d_point_y = new float[point_size];
static int *d_point_i = new int[point_size];
static double *d_w = new double[point_size];
static float *d_transed_point_x = new float[point_size];
static float *d_transed_point_y = new float[point_size];
__global__ void MeasInRMap(float *d_point_x, float *d_point_y, int *d_point_i, int *d_Map, float Map_resolution,
unsigned int Map_width, unsigned int Map_height, double Map_origin_x, double Map_origin_y, double *d_w)
{
int tid = blockIdx.x;
int xIndex, yIndex;
int mapmeas;
double resolutionInverse = 1/Map_resolution;
if(tid < point_size){
xIndex = (int)((d_point_x[tid] - Map_origin_x)*resolutionInverse);
yIndex = (int)((d_point_y[tid] - Map_origin_y)*resolutionInverse);
if(xIndex < Map_width && yIndex < Map_height){
int mapIndex = Map_width*yIndex+xIndex;
mapmeas = d_Map[mapIndex];
if(mapmeas <0)
mapmeas +=256;
if(mapmeas > 100)
d_w[tid] = 1;
else
d_w[tid] = 0;
if(d_w[tid]>100)
printf("road weight : %f\n",d_w[tid]);
}
else{
d_w[tid]=0;
printf("Out of RMap size!!!!!!!!!\n");
}
}
else
printf("Out of Road Point size!!!!!!!\n");
}
__global__ void MeasInVMap(float *d_point_x, float *d_point_y, int *d_point_i, int *d_Map, float Map_resolution,
unsigned int Map_width, unsigned int Map_height, double Map_origin_x, double Map_origin_y, double *d_w)
{
int tid = blockIdx.x;
int xIndex, yIndex;
int mapmeas;
double resolutionInverse = 1/Map_resolution;
if(tid<point_size){
xIndex = (int)((d_point_x[tid] - Map_origin_x)*resolutionInverse);
yIndex = (int)((d_point_y[tid] - Map_origin_y)*resolutionInverse);
if(xIndex < Map_width && yIndex < Map_height){
int mapIndex = Map_width*yIndex+xIndex;
mapmeas = d_Map[mapIndex];
if(mapmeas < 0)
mapmeas +=256;
int b1=0, b2=0;
b1 = mapmeas&d_point_i[tid];
int shBits;
for (shBits=0; b1!=0;shBits++){
b1 = b1 & (b1 -1);
}
// b2 = shBits*shBits;
// if(mapmeas == d_point_i[tid] && b2!=1)
// b2=b2*2;
// d_w[tid] = b2;
d_w[tid] = shBits;
}
else{
printf("VMap%d\t%d\t%d\t%d\n",xIndex,Map_width,yIndex,Map_height);
d_w[tid] = 0;
printf("Out of VMap size!!!!!!!!!\n");
}
}
else
printf("Out of Vertical Point size!!!!!!!!!\n");
}
__global__ void Transformcuda(float *d_trans_point_x, float *d_trans_point_y, float *d_transed_point_x, float *d_transed_point_y, float Tx, float Ty, float theta)
{
int tid = blockIdx.x;
d_transed_point_x[tid] = d_trans_point_x[tid]*cos(theta) - d_trans_point_y[tid]*sin(theta) + Tx;
d_transed_point_y[tid] = d_trans_point_x[tid]*sin(theta) + d_trans_point_y[tid]*cos(theta) + Ty;
}
double *MeasInMapCUDA(int N, float *point_x, float *point_y , int *point_i, int *Map, float Map_resolution,
unsigned int Map_width, unsigned int Map_height, double Map_origin_x, double Map_origin_y, float Tx, float Ty, float theta, double *w, std::string type)
{
// Device copies of three inputs and output, size of allocated memory, num of threads and blocks
hipMemcpy(d_point_x,point_x,N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_point_y,point_y,N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_point_i,point_i,N*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Transformcuda), dim3(N),dim3(1), 0, 0, d_point_x, d_point_y, d_transed_point_x, d_transed_point_y, Tx, Ty, theta);
if(type == "vertical")
hipLaunchKernelGGL(( MeasInVMap), dim3(N),dim3(1), 0, 0, d_transed_point_x, d_transed_point_y, d_point_i, d_VMap, Map_resolution, Map_width, Map_height, Map_origin_x, Map_origin_y, d_w);
else if(type == "road")
hipLaunchKernelGGL(( MeasInRMap), dim3(N),dim3(1), 0, 0, d_transed_point_x, d_transed_point_y, d_point_i, d_RMap, Map_resolution, Map_width, Map_height, Map_origin_x, Map_origin_y, d_w);
hipMemcpy(w, d_w, N*sizeof(double), hipMemcpyDeviceToHost);
return w;
}
void CopyVMapCUDA(int *Map, unsigned int Map_width, unsigned int Map_height){
d_VMap = new int[Map_width*Map_height];
hipMalloc((void **)&d_VMap, Map_width*Map_height*sizeof(int));
hipMemcpy(d_VMap, Map, Map_width*Map_height*sizeof(int), hipMemcpyHostToDevice);
}
void CopyRMapCUDA(int *Map, unsigned int Map_width, unsigned int Map_height) {
d_RMap = new int[Map_width*Map_height];
hipMalloc((void **)&d_RMap, Map_width * Map_height * sizeof(int));
hipMemcpy(d_RMap, Map, Map_width * Map_height * sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void **)&d_point_x, point_size*sizeof(float));
hipMalloc((void **)&d_point_y, point_size*sizeof(float));
hipMalloc((void **)&d_point_i, point_size*sizeof(int));
hipMalloc((void **)&d_transed_point_x, point_size*sizeof(float));
hipMalloc((void **)&d_transed_point_y, point_size*sizeof(float));
hipMalloc((void **)&d_w, point_size*sizeof(double));
}
void CUDAFree(){
hipFree(&d_VMap); hipFree(&d_RMap); hipFree(&d_point_x); hipFree(&d_point_y); hipFree(&d_point_i); hipFree(&d_w);
hipFree(&d_transed_point_x);hipFree(&d_transed_point_y);
} | ebdee4e1f35a8d511117e17493f93384179e5b11.cu | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <time.h>
#include <math.h>
#define point_size 30000
static int *d_VMap;
static int *d_RMap;
static float *d_point_x = new float[point_size];
static float *d_point_y = new float[point_size];
static int *d_point_i = new int[point_size];
static double *d_w = new double[point_size];
static float *d_transed_point_x = new float[point_size];
static float *d_transed_point_y = new float[point_size];
__global__ void MeasInRMap(float *d_point_x, float *d_point_y, int *d_point_i, int *d_Map, float Map_resolution,
unsigned int Map_width, unsigned int Map_height, double Map_origin_x, double Map_origin_y, double *d_w)
{
int tid = blockIdx.x;
int xIndex, yIndex;
int mapmeas;
double resolutionInverse = 1/Map_resolution;
if(tid < point_size){
xIndex = (int)((d_point_x[tid] - Map_origin_x)*resolutionInverse);
yIndex = (int)((d_point_y[tid] - Map_origin_y)*resolutionInverse);
if(xIndex < Map_width && yIndex < Map_height){
int mapIndex = Map_width*yIndex+xIndex;
mapmeas = d_Map[mapIndex];
if(mapmeas <0)
mapmeas +=256;
if(mapmeas > 100)
d_w[tid] = 1;
else
d_w[tid] = 0;
if(d_w[tid]>100)
printf("road weight : %f\n",d_w[tid]);
}
else{
d_w[tid]=0;
printf("Out of RMap size!!!!!!!!!\n");
}
}
else
printf("Out of Road Point size!!!!!!!\n");
}
__global__ void MeasInVMap(float *d_point_x, float *d_point_y, int *d_point_i, int *d_Map, float Map_resolution,
unsigned int Map_width, unsigned int Map_height, double Map_origin_x, double Map_origin_y, double *d_w)
{
int tid = blockIdx.x;
int xIndex, yIndex;
int mapmeas;
double resolutionInverse = 1/Map_resolution;
if(tid<point_size){
xIndex = (int)((d_point_x[tid] - Map_origin_x)*resolutionInverse);
yIndex = (int)((d_point_y[tid] - Map_origin_y)*resolutionInverse);
if(xIndex < Map_width && yIndex < Map_height){
int mapIndex = Map_width*yIndex+xIndex;
mapmeas = d_Map[mapIndex];
if(mapmeas < 0)
mapmeas +=256;
int b1=0, b2=0;
b1 = mapmeas&d_point_i[tid];
int shBits;
for (shBits=0; b1!=0;shBits++){
b1 = b1 & (b1 -1);
}
// b2 = shBits*shBits;
// if(mapmeas == d_point_i[tid] && b2!=1)
// b2=b2*2;
// d_w[tid] = b2;
d_w[tid] = shBits;
}
else{
printf("VMap%d\t%d\t%d\t%d\n",xIndex,Map_width,yIndex,Map_height);
d_w[tid] = 0;
printf("Out of VMap size!!!!!!!!!\n");
}
}
else
printf("Out of Vertical Point size!!!!!!!!!\n");
}
__global__ void Transformcuda(float *d_trans_point_x, float *d_trans_point_y, float *d_transed_point_x, float *d_transed_point_y, float Tx, float Ty, float theta)
{
int tid = blockIdx.x;
d_transed_point_x[tid] = d_trans_point_x[tid]*cos(theta) - d_trans_point_y[tid]*sin(theta) + Tx;
d_transed_point_y[tid] = d_trans_point_x[tid]*sin(theta) + d_trans_point_y[tid]*cos(theta) + Ty;
}
double *MeasInMapCUDA(int N, float *point_x, float *point_y , int *point_i, int *Map, float Map_resolution,
unsigned int Map_width, unsigned int Map_height, double Map_origin_x, double Map_origin_y, float Tx, float Ty, float theta, double *w, std::string type)
{
// Device copies of three inputs and output, size of allocated memory, num of threads and blocks
cudaMemcpy(d_point_x,point_x,N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_point_y,point_y,N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_point_i,point_i,N*sizeof(int), cudaMemcpyHostToDevice);
Transformcuda<<<N,1>>>(d_point_x, d_point_y, d_transed_point_x, d_transed_point_y, Tx, Ty, theta);
if(type == "vertical")
MeasInVMap<<<N,1>>>(d_transed_point_x, d_transed_point_y, d_point_i, d_VMap, Map_resolution, Map_width, Map_height, Map_origin_x, Map_origin_y, d_w);
else if(type == "road")
MeasInRMap<<<N,1>>>(d_transed_point_x, d_transed_point_y, d_point_i, d_RMap, Map_resolution, Map_width, Map_height, Map_origin_x, Map_origin_y, d_w);
cudaMemcpy(w, d_w, N*sizeof(double), cudaMemcpyDeviceToHost);
return w;
}
void CopyVMapCUDA(int *Map, unsigned int Map_width, unsigned int Map_height){
d_VMap = new int[Map_width*Map_height];
cudaMalloc((void **)&d_VMap, Map_width*Map_height*sizeof(int));
cudaMemcpy(d_VMap, Map, Map_width*Map_height*sizeof(int), cudaMemcpyHostToDevice);
}
void CopyRMapCUDA(int *Map, unsigned int Map_width, unsigned int Map_height) {
d_RMap = new int[Map_width*Map_height];
cudaMalloc((void **)&d_RMap, Map_width * Map_height * sizeof(int));
cudaMemcpy(d_RMap, Map, Map_width * Map_height * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_point_x, point_size*sizeof(float));
cudaMalloc((void **)&d_point_y, point_size*sizeof(float));
cudaMalloc((void **)&d_point_i, point_size*sizeof(int));
cudaMalloc((void **)&d_transed_point_x, point_size*sizeof(float));
cudaMalloc((void **)&d_transed_point_y, point_size*sizeof(float));
cudaMalloc((void **)&d_w, point_size*sizeof(double));
}
void CUDAFree(){
cudaFree(&d_VMap); cudaFree(&d_RMap); cudaFree(&d_point_x); cudaFree(&d_point_y); cudaFree(&d_point_i); cudaFree(&d_w);
cudaFree(&d_transed_point_x);cudaFree(&d_transed_point_y);
} |
d91e3c2a433483cc3f3c46fe50ac0d5bfdd58bd6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <time.h>
#include "cuStopwatch.cu"
#define SHIFT 27
__global__ void compute_parity_1(const uint32_t* arr, uint32_t* arr_res, uint32_t size) {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < size){
if((tid % 2) == 0){
uint32_t popcnt = 0;
uint32_t n = arr[tid];
while(n != 0){
popcnt += n%2;
n /= 2;
}
arr_res[tid] = popcnt;
}else{
uint32_t step = 0;
uint64_t n = arr[tid];
while(n != 1){
step++;
if(n%2) n = 3*n + 1; else n /= 2;
}
arr_res[tid] = step;
}
}
return;
}
__global__ void compute_parity_2(const uint32_t* arr, uint32_t* arr_res, uint32_t size) {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < size){
if(tid * 2 < size){
tid *= 2;
uint32_t popcnt = 0;
uint32_t n = arr[tid];
while(n != 0){
popcnt += n%2;
n /= 2;
}
arr_res[tid] = popcnt;
}else{
tid = tid * 2 - size + 1;
uint32_t step = 0;
uint64_t n = arr[tid];
while(n != 1){
step++;
if(n%2) n = 3*n + 1; else n /= 2;
}
arr_res[tid] = step;
}
}
return;
}
void randgen(uint32_t* arr, size_t count){
uint32_t state = time(NULL);
for(uint32_t i = 0; i < count; i++){
state ^= state << 13;
state ^= state >> 17;
state ^= state << 5;
arr[i] = state;
}
return;
}
int main() {
// Allocate memory, filling in random data and transfer to device
uint32_t *arr_host, *arr_dev, *arr_res_dev;
const uint32_t arr_size = 1 << SHIFT;
hipHostMalloc((void**)&arr_host, arr_size*sizeof(uint32_t), hipHostMallocDefault);
hipMalloc((void**)&arr_dev, arr_size*sizeof(uint32_t));
hipMalloc((void**)&arr_res_dev, arr_size*sizeof(uint32_t));
printf("Copying data to device\n");
randgen(arr_host, arr_size);
hipMemcpy(arr_dev, arr_host, arr_size*sizeof(uint32_t), hipMemcpyHostToDevice);
hipHostFree(arr_host);
// Performing odd-even computing on 2^25 integers
printf("First method\n");
cuStopwatch sw1;
sw1.start();
hipLaunchKernelGGL(( compute_parity_1), dim3((1<<SHIFT-10)), dim3(1024), 0, 0, 0, 0, arr_dev, arr_res_dev, arr_size);
printf("%.4fms\n", sw1.stop());
printf("\nSecond method\n");
cuStopwatch sw2;
sw2.start();
hipLaunchKernelGGL(( compute_parity_2), dim3((1<<SHIFT-10)), dim3(1024), 0, 0, 0, 0, arr_dev, arr_res_dev, arr_size);
printf("%.4fms\n", sw2.stop());
// Free memory
hipFree(arr_dev);
hipFree(arr_res_dev);
return 0;
} | d91e3c2a433483cc3f3c46fe50ac0d5bfdd58bd6.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <stdint.h>
#include <time.h>
#include "cuStopwatch.cu"
#define SHIFT 27
__global__ void compute_parity_1(const uint32_t* arr, uint32_t* arr_res, uint32_t size) {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < size){
if((tid % 2) == 0){
uint32_t popcnt = 0;
uint32_t n = arr[tid];
while(n != 0){
popcnt += n%2;
n /= 2;
}
arr_res[tid] = popcnt;
}else{
uint32_t step = 0;
uint64_t n = arr[tid];
while(n != 1){
step++;
if(n%2) n = 3*n + 1; else n /= 2;
}
arr_res[tid] = step;
}
}
return;
}
__global__ void compute_parity_2(const uint32_t* arr, uint32_t* arr_res, uint32_t size) {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < size){
if(tid * 2 < size){
tid *= 2;
uint32_t popcnt = 0;
uint32_t n = arr[tid];
while(n != 0){
popcnt += n%2;
n /= 2;
}
arr_res[tid] = popcnt;
}else{
tid = tid * 2 - size + 1;
uint32_t step = 0;
uint64_t n = arr[tid];
while(n != 1){
step++;
if(n%2) n = 3*n + 1; else n /= 2;
}
arr_res[tid] = step;
}
}
return;
}
void randgen(uint32_t* arr, size_t count){
uint32_t state = time(NULL);
for(uint32_t i = 0; i < count; i++){
state ^= state << 13;
state ^= state >> 17;
state ^= state << 5;
arr[i] = state;
}
return;
}
int main() {
// Allocate memory, filling in random data and transfer to device
uint32_t *arr_host, *arr_dev, *arr_res_dev;
const uint32_t arr_size = 1 << SHIFT;
cudaHostAlloc((void**)&arr_host, arr_size*sizeof(uint32_t), cudaHostAllocDefault);
cudaMalloc((void**)&arr_dev, arr_size*sizeof(uint32_t));
cudaMalloc((void**)&arr_res_dev, arr_size*sizeof(uint32_t));
printf("Copying data to device\n");
randgen(arr_host, arr_size);
cudaMemcpy(arr_dev, arr_host, arr_size*sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaFreeHost(arr_host);
// Performing odd-even computing on 2^25 integers
printf("First method\n");
cuStopwatch sw1;
sw1.start();
compute_parity_1<<<(1<<SHIFT-10), 1024>>>(arr_dev, arr_res_dev, arr_size);
printf("%.4fms\n", sw1.stop());
printf("\nSecond method\n");
cuStopwatch sw2;
sw2.start();
compute_parity_2<<<(1<<SHIFT-10), 1024>>>(arr_dev, arr_res_dev, arr_size);
printf("%.4fms\n", sw2.stop());
// Free memory
cudaFree(arr_dev);
cudaFree(arr_res_dev);
return 0;
} |
de861bf89e8897966253597927e5d77b1685e610.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2011 Russel Steinbach, Jeffrey Blanchard, Bradley Gordon,
* and Toluwaloju Alabi
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/transform_reduce.h>
#include <thrust/random.h>
#include <thrust/functional.h>
namespace RandomizedBucketSelect{
using namespace std;
#define MAX_THREADS_PER_BLOCK 1024
#define CUTOFF_POINT 200000
#define NUM_PIVOTS 17
#define CUDA_CALL(x) do { if((x) != hipSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
/// ***********************************************************
/// ***********************************************************
/// **** HELPER CPU FUNCTIONS
/// ***********************************************************
/// ***********************************************************
hipEvent_t start, stop;
float time;
void timing(int selection, int ind){
if(selection==0) {
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
}
else {
hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("Time %d: %lf \n", ind, time);
}
}
template<typename T>
void cleanup(uint *h_c, T* d_k, int *etb, uint *bc){
free(h_c);
hipFree(d_k);
hipFree(etb);
hipFree(bc);
}
//This function initializes a vector to all zeros on the host (CPU)
void setToAllZero(uint* deviceVector, int length){
hipMemset(deviceVector, 0, length * sizeof(uint));
}
/// ***********************************************************
/// ***********************************************************
/// **** HELPER GPU FUNCTIONS-KERNELS
/// ***********************************************************
/// ***********************************************************
//this function assigns elements to buckets based off of a randomized sampling of the elements in the vector
template <typename T>
__global__ void assignSmartBucket(T * d_vector, int length, int numBuckets, double * slopes, T * pivots, int numPivots, uint* elementToBucket, uint* bucketCount, int offset){
int index = blockDim.x * blockIdx.x + threadIdx.x;
uint bucketIndex;
int threadIndex = threadIdx.x;
//variables in shared memory for fast access
__shared__ int sharedNumSmallBuckets;
if (threadIndex < 1)
sharedNumSmallBuckets = numBuckets / (numPivots-1);
extern __shared__ uint array[];
uint * sharedBuckets = (uint *)array;
double * sharedSlopes = (double *)&sharedBuckets[numBuckets];
T * sharedPivots = (T *)&sharedSlopes[numPivots-1];
/*
uint * sharedBuckets = (uint *)array;
double * sharedSlopes = (double *)&sharedBuckets[numBuckets];
T * sharedPivots = (T *)&sharedSlopes[numPivots-1];
// statically allocating the array gives faster results
__shared__ double sharedSlopes[NUM_PIVOTS-1];
__shared__ T sharedPivots[NUM_PIVOTS];
*/
//reading bucket counts into shared memory where increments will be performed
for (int i = 0; i < (numBuckets / MAX_THREADS_PER_BLOCK); i++)
if (threadIndex < numBuckets)
sharedBuckets[i * MAX_THREADS_PER_BLOCK + threadIndex] = 0;
if(threadIndex < numPivots) {
*(sharedPivots + threadIndex) = *(pivots + threadIndex);
if(threadIndex < numPivots-1)
sharedSlopes[threadIndex] = slopes[threadIndex];
}
syncthreads();
//assigning elements to buckets and incrementing the bucket counts
if(index < length) {
int i;
for(i = index; i < length; i += offset) {
T num = d_vector[i];
int minPivotIndex = 0;
int maxPivotIndex = numPivots-1;
int midPivotIndex;
// find the index of the pivot that is the greatest s.t. lower than or equal to num using binary search
//while (maxPivotIndex > minPivotIndex+1) {
for(int j = 1; j < numPivots - 1; j*=2) {
midPivotIndex = (maxPivotIndex + minPivotIndex) / 2;
if (num >= sharedPivots[midPivotIndex])
minPivotIndex = midPivotIndex;
else
maxPivotIndex = midPivotIndex;
}
bucketIndex = (minPivotIndex * sharedNumSmallBuckets) + (int) ((num - sharedPivots[minPivotIndex]) * sharedSlopes[minPivotIndex]);
elementToBucket[i] = bucketIndex;
// hashmap implementation set[bucketindex]=add.i;
//bucketCount[blockIdx.x * numBuckets + bucketIndex]++;
atomicInc (sharedBuckets + bucketIndex, length);
}
}
syncthreads();
//reading bucket counts from shared memory back to global memory
for (int i = 0; i < (numBuckets / MAX_THREADS_PER_BLOCK); i++)
if (threadIndex < numBuckets)
//atomicAdd(bucketCount + blockIdx.x * numBuckets + i * MAX_THREADS_PER_BLOCK + threadIndex, sharedBuckets[i * MAX_THREADS_PER_BLOCK + threadIndex]);
*(bucketCount + blockIdx.x * numBuckets + i * MAX_THREADS_PER_BLOCK + threadIndex) = *(sharedBuckets + i * MAX_THREADS_PER_BLOCK + threadIndex);
}
//this function assigns elements to buckets
template <typename T>
__global__ void assignBucket(T* d_vector, int length, int bucketNumbers, double slope, double minimum, int* bucket, uint* bucketCount, int offset){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int bucketIndex;
extern __shared__ uint sharedBuckets[];
int index = threadIdx.x;
//variables in shared memory for fast access
__shared__ int sbucketNums;
__shared__ double sMin;
sbucketNums = bucketNumbers;
sMin = minimum;
//reading bucket counts into shared memory where increments will be performed
for(int i=0; i < (bucketNumbers/1024); i++)
if(index < bucketNumbers)
sharedBuckets[i*1024+index] = 0;
syncthreads();
//assigning elements to buckets and incrementing the bucket counts
if(idx < length) {
int i;
for(i=idx; i< length; i+=offset){
//calculate the bucketIndex for each element
bucketIndex = (d_vector[i] - sMin) * slope;
//if it goes beyond the number of buckets, put it in the last bucket
if(bucketIndex >= sbucketNums)
bucketIndex = sbucketNums - 1;
bucket[i] = bucketIndex;
atomicInc(&sharedBuckets[bucketIndex], length);
}
}
syncthreads();
//reading bucket counts from shared memory back to global memory
for(int i=0; i < (bucketNumbers/1024); i++)
if(index < bucketNumbers)
atomicAdd(&bucketCount[i*1024+index], sharedBuckets[i*1024+index]);
}
//this function reassigns elements to buckets
template <typename T>
__global__ void reassignBucket(T* d_vector, int *bucket, uint *bucketCount, const int bucketNumbers, const int length, const double slope, const double maximum, const double minimum, int offset, int Kbucket){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
extern __shared__ uint sharedBuckets[];
int index = threadIdx.x;
int bucketIndex;
//reading bucket counts to shared memory where increments will be performed
if(index < bucketNumbers){
sharedBuckets[index] =0;
}
syncthreads();
//assigning elements to buckets and incrementing the bucket counts
if (idx < length){
int i;
for(i=idx; i<length; i+=offset){
if(bucket[i] != Kbucket){
bucket[i] = bucketNumbers+1;
}
else{
//calculate the bucketIndex for each element
bucketIndex = (d_vector[i] - minimum) * slope;
//if it goes beyond the number of buckets, put it in the last bucket
if(bucketIndex >= bucketNumbers){
bucketIndex = bucketNumbers - 1;
}
bucket[i] = bucketIndex;
atomicInc(&sharedBuckets[bucketIndex], length);
}
}
}
syncthreads();
//reading bucket counts from shared memory back to global memory
if(index < bucketNumbers){
atomicAdd(&bucketCount[index], sharedBuckets[index]);
}
}
//this function finds the bin containing the kth element we are looking for (works on the host)
inline int FindKBucket(uint *d_counter, uint *h_counter, const int numBuckets, const int k, uint * sum){
hipMemcpy(sum, d_counter, sizeof(uint), hipMemcpyDeviceToHost);
int Kbucket = 0;
if (*sum<k){
hipMemcpy(h_counter, d_counter, numBuckets * sizeof(uint), hipMemcpyDeviceToHost);
while ( (*sum<k) & (Kbucket<numBuckets-1)){
Kbucket++;
*sum += h_counter[Kbucket];
}
}
else{
hipMemcpy(h_counter, d_counter, sizeof(uint), hipMemcpyDeviceToHost);
}
return Kbucket;
}
//this function finds the bin containing the kth element we are looking for (works on the host)
inline int findKBucket(uint * d_bucketCount, uint * h_bucketCount, int numBuckets, int k, uint * sum, int numBlocks){
int sumsRowIndex= numBuckets * (numBlocks-1);
/*
for(int j=0; j<numBuckets; j++)
CUDA_CALL(hipMemcpy(h_bucketCount + j, d_bucketCount + sumsRowIndex + j, sizeof(uint), hipMemcpyDeviceToHost));
*/
CUDA_CALL(hipMemcpy(h_bucketCount, d_bucketCount + sumsRowIndex, sizeof(uint) * numBuckets, hipMemcpyDeviceToHost));
int kBucket = 0;
uint scanner = h_bucketCount[0];
while ((scanner < k) & (kBucket < numBuckets - 1)) {
kBucket++;
scanner += h_bucketCount[kBucket];
}
*(sum) = scanner - h_bucketCount[kBucket];
return kBucket;
}
__global__ void sumCounts(uint * d_bucketCount, const int numBuckets, const int numBlocks) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
for(int j=1; j<numBlocks; j++)
d_bucketCount[index + numBuckets*j] += d_bucketCount[index + numBuckets*(j-1)];
}
//copy elements in the kth bucket to a new array
template <typename T>
__global__ void copyElements (T* d_vector, int length, uint* elementToBucket, const int bucket, T* newArray, uint offset, uint * d_bucketCount, int numTotalBuckets){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int threadIndex = threadIdx.x;
__shared__ uint sharedBucket;
__shared__ uint sharedBucketCount;
if(threadIndex < 1) {
sharedBucket = bucket;
sharedBucketCount = d_bucketCount[blockIdx.x * numTotalBuckets + bucket];
}
syncthreads();
if(idx < length) {
for(int i=idx; i<length; i+=offset) {
if (elementToBucket[i] == sharedBucket)
//newArray[atomicDec(d_bucketCount + blockIdx.x * numTotalBuckets + temp, length)-1] = d_vector[i];
newArray[atomicDec(&sharedBucketCount, length) - 1] = d_vector[i];
}
}
}
//copy elements in the kth bucket to a new array
template <typename T>
__global__ void copyElement(T* d_vector, int length, int* elementToBucket, int bucket, T* newArray, uint* count, int offset){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < length){
for(int i=idx; i<length; i+=offset)
//copy elements in the kth bucket to the new array
if(elementToBucket[i] == bucket)
newArray[atomicInc(count, length)] = d_vector[i];
}
}
template <typename T>
__global__ void GetKvalue(T* d_vector, int * d_bucket, const int Kbucket, const int n, T* Kvalue, int offset )
{
uint xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (xIndex < n) {
int i;
for(i=xIndex; i<n; i+=offset){
if ( d_bucket[i] == Kbucket )
Kvalue[0] = d_vector[i];
}
}
}
/// ***********************************************************
/// ***********************************************************
/// **** GENERATE PIVOTS
/// ***********************************************************
/// ***********************************************************
__host__ __device__
unsigned int hash(unsigned int a) {
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
struct RandomNumberFunctor :
public thrust::unary_function<unsigned int, float> {
unsigned int mainSeed;
RandomNumberFunctor(unsigned int _mainSeed) :
mainSeed(_mainSeed) {}
__host__ __device__
float operator()(unsigned int threadIdx)
{
unsigned int seed = hash(threadIdx) * mainSeed;
thrust::default_random_engine rng(seed);
rng.discard(threadIdx);
thrust::uniform_real_distribution<float> u(0, 1);
return u(rng);
}
};
template <typename T>
void createRandomVector(T * d_vec, int size) {
timeval t1;
uint seed;
gettimeofday(&t1, NULL);
seed = t1.tv_usec * t1.tv_sec;
thrust::device_ptr<T> d_ptr(d_vec);
thrust::transform (thrust::counting_iterator<uint>(0),thrust::counting_iterator<uint>(size), d_ptr, RandomNumberFunctor(seed));
}
template <typename T>
__global__ void enlargeIndexAndGetElements (T * in, T * list, int size) {
*(in + blockIdx.x*blockDim.x + threadIdx.x) = *(list + ((int) (*(in + blockIdx.x * blockDim.x + threadIdx.x) * size)));
}
__global__ void enlargeIndexAndGetElements (float * in, uint * out, uint * list, int size) {
*(out + blockIdx.x * blockDim.x + threadIdx.x) = (uint) *(list + ((int) (*(in + blockIdx.x * blockDim.x + threadIdx.x) * size)));
}
template <typename T>
void generatePivots (T * pivots, double * slopes, T * d_list, int sizeOfVector, int numPivots, int sizeOfSample, int totalSmallBuckets, T min, T max) {
T * d_randoms;
int endOffset = 22;
int pivotOffset = (sizeOfSample - endOffset * 2) / (numPivots - 3);
int numSmallBuckets = totalSmallBuckets / (numPivots - 1);
hipMalloc (&d_randoms, sizeof (T) * sizeOfSample);
createRandomVector (d_randoms, sizeOfSample);
// converts randoms floats into elements from necessary indices
hipLaunchKernelGGL(( enlargeIndexAndGetElements), dim3((sizeOfSample/MAX_THREADS_PER_BLOCK)), dim3(MAX_THREADS_PER_BLOCK), 0, 0, d_randoms, d_list, sizeOfVector);
pivots[0] = min;
pivots[numPivots - 1] = max;
thrust::device_ptr<T>randoms_ptr(d_randoms);
thrust::sort(randoms_ptr, randoms_ptr + sizeOfSample);
hipDeviceSynchronize();
// set the pivots which are endOffset away from the min and max pivots
hipMemcpy (pivots + 1, d_randoms + endOffset - 1, sizeof (T), hipMemcpyDeviceToHost);
hipMemcpy (pivots + numPivots - 2, d_randoms + sizeOfSample - endOffset - 1, sizeof (T), hipMemcpyDeviceToHost);
slopes[0] = numSmallBuckets / (double) (pivots[1] - pivots[0]);
for (register int i = 2; i < numPivots - 2; i++) {
hipMemcpy (pivots + i, d_randoms + pivotOffset * (i - 1) + endOffset - 1, sizeof (T), hipMemcpyDeviceToHost);
slopes[i - 1] = numSmallBuckets / (double) (pivots[i] - pivots[i - 1]);
}
slopes[numPivots - 3] = numSmallBuckets / (double) (pivots[numPivots - 2] - pivots[numPivots - 3]);
slopes[numPivots - 2] = numSmallBuckets / (double) (pivots[numPivots - 1] - pivots[numPivots - 2]);
hipFree(d_randoms);
}
/************************************************************************/
/************************************************************************/
//THIS IS THE PHASE TWO FUNCTION WHICH WILL BE CALLED IF THE INPUT
//LENGTH IS LESS THAN THE CUTOFF OF 2MILLION 200 THOUSAND
/************************************************************************/
/************************************************************************/
template <typename T>
T phaseTwo(T* d_vector, int length, int K, int blocks, int threads, double maxValue = 0, double minValue = 0){
//declaring and initializing variables for kernel launches
int threadsPerBlock = threads;
int numBlocks = blocks;
int numBuckets = 1024;
int offset = blocks * threads;
uint sum=0, Kbucket=0, iter=0;
int Kbucket_count = 0;
//initializing variables for kernel launches
if(length < 1024){
numBlocks = 1;
}
//variable to store the end result
T kthValue =0;
//declaring and initializing other variables
size_t size = length * sizeof(int);
size_t totalBucketSize = numBuckets * sizeof(uint);
//allocate memory to store bucket assignments and to count elements in buckets
int* elementToBucket;
uint* d_bucketCount;
hipMalloc(&elementToBucket, size);
hipMalloc(&d_bucketCount, totalBucketSize);
uint * h_bucketCount = (uint*)malloc(totalBucketSize);
T* d_Kth_val;
hipMalloc(&d_Kth_val, sizeof(T));
thrust::device_ptr<T>dev_ptr(d_vector);
//if max == min, then we know that it must not have had the values passed in.
if(maxValue == minValue){
thrust::pair<thrust::device_ptr<T>, thrust::device_ptr<T> > result = thrust::minmax_element(dev_ptr, dev_ptr + length);
minValue = *result.first;
maxValue = *result.second;
}
double slope = (numBuckets - 1)/(maxValue - minValue);
//first check is max is equal to min
if(maxValue == minValue){
cleanup(h_bucketCount, d_Kth_val, elementToBucket,d_bucketCount);
return maxValue;
}
//make all entries of this vector equal to zero
setToAllZero(d_bucketCount, numBuckets);
//distribute elements to bucket
hipLaunchKernelGGL(( assignBucket), dim3(numBlocks), dim3(threadsPerBlock), numBuckets*sizeof(uint), 0, d_vector, length, numBuckets, slope, minValue, elementToBucket, d_bucketCount, offset);
//find the bucket containing the kth element we want
Kbucket = FindKBucket(d_bucketCount, h_bucketCount, numBuckets, K, &sum);
Kbucket_count = h_bucketCount[Kbucket];
while ( (Kbucket_count > 1) && (iter < 1000)){
minValue = max(minValue, minValue + Kbucket/slope);
maxValue = min(maxValue, minValue + 1/slope);
K = K - sum + Kbucket_count;
if ( maxValue - minValue > 0.0f ){
slope = (numBuckets - 1)/(maxValue-minValue);
setToAllZero(d_bucketCount, numBuckets);
hipLaunchKernelGGL(( reassignBucket), dim3(numBlocks), dim3(threadsPerBlock), numBuckets * sizeof(uint) , 0, d_vector, elementToBucket, d_bucketCount, numBuckets,length, slope, maxValue, minValue, offset, Kbucket);
sum = 0;
Kbucket = FindKBucket(d_bucketCount, h_bucketCount, numBuckets, K, &sum);
Kbucket_count = h_bucketCount[Kbucket];
iter++;
}
else{
//if the max and min are the same, then we are done
cleanup(h_bucketCount, d_Kth_val, elementToBucket, d_bucketCount);
return maxValue;
}
}
hipLaunchKernelGGL(( GetKvalue), dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, d_vector, elementToBucket, Kbucket, length, d_Kth_val, offset);
hipMemcpy(&kthValue, d_Kth_val, sizeof(T), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
cleanup(h_bucketCount, d_Kth_val, elementToBucket, d_bucketCount);
return kthValue;
}
/* this function finds the kth-largest element from the input array */
template <typename T>
T phaseOne(T* d_vector, int length, int K, int blocks, int threads, int pass = 0){
//declaring variables for kernel launches
int threadsPerBlock = threads;
int numBlocks = blocks;
int numBuckets = 1024;
int offset = blocks * threads;
int kthBucket, kthBucketCount;
int newInputLength;
int* elementToBucket; //array showing what bucket every element is in
//declaring and initializing other variables
uint *d_bucketCount, *count; //array showing the number of elements in each bucket
uint kthBucketScanner = 0;
size_t size = length * sizeof(int);
//variable to store the end result
T kthValue = 0;
T* newInput;
//find max and min with thrust
double maximum, minimum;
thrust::device_ptr<T>dev_ptr(d_vector);
thrust::pair<thrust::device_ptr<T>, thrust::device_ptr<T> > result = thrust::minmax_element(dev_ptr, dev_ptr + length);
minimum = *result.first;
maximum = *result.second;
//if the max and the min are the same, then we are done
if(maximum == minimum){
return maximum;
}
//if we want the max or min just return it
if(K == 1){
return minimum;
}
if(K == length){
return maximum;
}
//Allocate memory to store bucket assignments
CUDA_CALL(hipMalloc(&elementToBucket, size));
//Allocate memory to store bucket counts
size_t totalBucketSize = numBuckets * sizeof(uint);
CUDA_CALL(hipMalloc(&d_bucketCount, totalBucketSize));
uint* h_bucketCount = (uint*)malloc(totalBucketSize);
//Calculate max-min
double range = maximum - minimum;
//Calculate the slope, i.e numBuckets/range
double slope = (numBuckets - 1)/range;
hipMalloc(&count, sizeof(uint));
//Set the bucket count vector to all zeros
setToAllZero(d_bucketCount, numBuckets);
//Distribute elements into their respective buckets
hipLaunchKernelGGL(( assignBucket), dim3(numBlocks), dim3(threadsPerBlock), numBuckets*sizeof(uint), 0, d_vector, length, numBuckets, slope, minimum, elementToBucket, d_bucketCount, offset);
kthBucket = FindKBucket(d_bucketCount, h_bucketCount, numBuckets, K, &kthBucketScanner);
kthBucketCount = h_bucketCount[kthBucket];
printf("original kthBucketCount = %d\n", kthBucketCount);
//we must update K since we have reduced the problem size to elements in the kth bucket
if(kthBucket != 0){
K = kthBucketCount - (kthBucketScanner - K);
}
//copy elements in the kth bucket to a new array
hipMalloc(&newInput, kthBucketCount * sizeof(T));
setToAllZero(count, 1);
hipLaunchKernelGGL(( copyElement), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_vector, length, elementToBucket, kthBucket, newInput, count, offset);
//store the length of the newly copied elements
newInputLength = kthBucketCount;
//if we only copied one element, then we are done
if(newInputLength == 1){
thrust::device_ptr<T>new_ptr(newInput);
kthValue = new_ptr[0];
//free all used memory
hipFree(elementToBucket); hipFree(d_bucketCount); hipFree(count); hipFree(newInput);
return kthValue;
}
/*********************************************************************/
//END OF FIRST PASS, NOW WE PROCEED TO SUBSEQUENT PASSES
/*********************************************************************/
//if the new length is greater than the CUTOFF, run the regular phaseOne again
if(newInputLength > CUTOFF_POINT && pass < 1){
if(pass > 0){
hipFree(d_vector);
}
hipFree(elementToBucket); hipFree(d_bucketCount); hipFree(count);
kthValue = phaseOne(newInput, newInputLength, K, blocks, threads,pass + 1);
}
else{
minimum = max(minimum, minimum + kthBucket/slope);
maximum = min(maximum, minimum + 1/slope);
kthValue = phaseTwo(newInput,newInputLength, K, blocks, threads,maximum, minimum);
}
//free all used memory
hipFree(elementToBucket); hipFree(d_bucketCount); hipFree(newInput); hipFree(count);
return kthValue;
}
/************************* BEGIN MAIN FUNCTIONS FOR RANDOMIZEDBLOCKEDBUCKETSELECT ************************/
/************************* BEGIN MAIN FUNCTIONS FOR RANDOMIZEDBLOCKEDBUCKETSELECT ************************/
/************************* BEGIN MAIN FUNCTIONS FOR RANDOMIZEDBLOCKEDBUCKETSELECT ************************/
/// ***********************************************************
/// ***********************************************************
/// **** MAIN FUNCTION
/// ***********************************************************
/// ***********************************************************
/* this function finds the kth-largest element from the input array */
template <typename T>
T phaseOneR(T* d_vector, int length, int K, int blocks, int threads, int pass = 0){
/// ***********************************************************
/// ****STEP 1: Find Min and Max of the whole vector
/// ****We don't need to go through the rest of the algorithm if it's flat
/// ***********************************************************
T maximum, minimum;
thrust::device_ptr<T>dev_ptr(d_vector);
thrust::pair<thrust::device_ptr<T>, thrust::device_ptr<T> > result = thrust::minmax_element(dev_ptr, dev_ptr + length);
minimum = *result.first;
maximum = *result.second;
//if the max and the min are the same, then we are done
if(maximum == minimum){
return maximum;
}
//if we want the max or min just return it
if(K == 1){
return minimum;
}
if(K == length){
return maximum;
}
/// ***********************************************************
/// ****STEP 2: Declare variables and allocate memory
/// **** Declare Variables
/// ***********************************************************
//declaring variables for kernel launches
int threadsPerBlock = threads;
int numBlocks = blocks;
int numBuckets = 4096;
int offset = blocks * threads;
// variables for the randomized selection
int numPivots = NUM_PIVOTS;
int sampleSize = MAX_THREADS_PER_BLOCK;
// pivot variables
double slopes[numPivots - 1];
double * d_slopes;
T pivots[numPivots];
T * d_pivots;
//Allocate memory to store bucket assignments
size_t size = length * sizeof(uint);
uint* d_elementToBucket; //array showing what bucket every element is in
CUDA_CALL(hipMalloc(&d_elementToBucket, size));
//Allocate memory to store bucket counts
size_t totalBucketSize = numBlocks * numBuckets * sizeof(uint);
uint h_bucketCount[numBuckets]; //array showing the number of elements in each bucket
uint * d_bucketCount;
CUDA_CALL(hipMalloc(&d_bucketCount, totalBucketSize));
// bucket counters
int kthBucket;
uint kthBucketScanner = 0;
// variable to store the end result
int newInputLength;
T* newInput;
T kthValue = 0;
/// ***********************************************************
/// ****STEP 3: Generate Pivots and Slopes
/// Declare slopes and pivots
/// ***********************************************************
CUDA_CALL(hipMalloc(&d_slopes, (numPivots - 1) * sizeof(double)));
CUDA_CALL(hipMalloc(&d_pivots, numPivots * sizeof(T)));
//Find bucket sizes using a randomized selection
generatePivots<T>(pivots, slopes, d_vector, length, numPivots, sampleSize, numBuckets, minimum, maximum);
// make any slopes that were infinity due to division by zero (due to no
// difference between the two associated pivots) into zero, so all the
// values which use that slope are projected into a single bucket
for (register int i = 0; i < numPivots - 1; i++)
if (isinf(slopes[i]))
slopes[i] = 0;
CUDA_CALL(hipMemcpy(d_slopes, slopes, (numPivots - 1) * sizeof(double), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_pivots, pivots, numPivots * sizeof(T), hipMemcpyHostToDevice));
/// ***********************************************************
/// ****STEP 4: Assign elements to buckets
///
/// ***********************************************************
//Distribute elements into their respective buckets
hipLaunchKernelGGL(( assignSmartBucket<T>), dim3(numBlocks), dim3(threadsPerBlock), numPivots * sizeof(T) + (numPivots-1) * sizeof(double) + numBuckets * sizeof(uint), 0, d_vector, length, numBuckets, d_slopes, d_pivots, numPivots, d_elementToBucket, d_bucketCount, offset);
hipLaunchKernelGGL(( sumCounts), dim3(numBuckets/threadsPerBlock), dim3(threadsPerBlock), 0, 0, d_bucketCount, numBuckets, numBlocks);
/// ***********************************************************
/// ****STEP 5: Find the kth buckets
/// and their respective update indices
/// ***********************************************************
kthBucket = findKBucket(d_bucketCount, h_bucketCount, numBuckets, K, &kthBucketScanner, numBlocks);
newInputLength = h_bucketCount[kthBucket];
K -= kthBucketScanner;
printf("original kthBucketCount = %d\n", newInputLength);
/// ***********************************************************
/// ****STEP 6: Copy the kth buckets
/// only unique ones
/// ***********************************************************
// allocate memories
CUDA_CALL(hipMalloc(&newInput, newInputLength * sizeof(T)));
hipLaunchKernelGGL(( copyElements<T>), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_vector, length, d_elementToBucket, kthBucket, newInput, offset, d_bucketCount, numBuckets);
//if we only copied one element, then we are done
if(newInputLength == 1){
thrust::device_ptr<T>new_ptr(newInput);
kthValue = new_ptr[0];
//free all used memory
hipFree(d_bucketCount);
hipFree(d_elementToBucket);
hipFree(d_pivots);
hipFree(d_slopes);
hipFree(newInput);
return kthValue;
}
/*********************************************************************/
//END OF FIRST PASS, NOW WE PROCEED TO SUBSEQUENT PASSES
/*********************************************************************/
//if the new length is greater than the CUTOFF, run the regular phaseOne again
if(newInputLength > CUTOFF_POINT && pass < 1){
if(pass > 0){
hipFree(d_vector);
}
hipFree(d_bucketCount);
hipFree(d_elementToBucket);
hipFree(d_pivots);
hipFree(d_slopes);
kthValue = phaseOne(newInput, newInputLength, K, blocks, threads,pass + 1);
}
else{
// find boundaries of kth bucket
int pivotOffset = numBuckets / (numPivots - 1);
int pivotIndex = kthBucket/pivotOffset;
int pivotInnerindex = kthBucket - pivotOffset * pivotIndex;
minimum = max(minimum, (T) (pivots[pivotIndex] + pivotInnerindex / slopes[pivotIndex]));
maximum = min(maximum, (T) (pivots[pivotIndex] + (pivotInnerindex+1) / slopes[pivotIndex]));
if (newInputLength<33000) {
thrust::device_ptr<T>newInput_ptr(newInput);
thrust::sort(newInput_ptr, newInput_ptr + newInputLength);
hipMemcpy (&kthValue, newInput + K - 1, sizeof (T), hipMemcpyDeviceToHost);
} else
kthValue = phaseTwo(newInput,newInputLength, K, blocks, threads,maximum, minimum);
}
//free all used memory
hipFree(d_elementToBucket);
hipFree(d_bucketCount);
hipFree(d_slopes);
hipFree(d_pivots);
hipFree(newInput);
return kthValue;
}
/**************************************************************************/
/**************************************************************************/
//THIS IS THE RANDOMIZEDBUCKETSELECT FUNCTION WRAPPER THAT CHOOSES THE CORRECT
//VERSION OF BUCKET SELECT TO RUN BASED ON THE INPUT LENGTH
/**************************************************************************/
template <typename T>
T randomizedBucketSelectWrapper(T* d_vector, int length, int K, int blocks, int threads)
{
T kthValue;
//change K to be the kth smallest
K = length - K + 1;
if(length <= CUTOFF_POINT)
{
kthValue = phaseTwo(d_vector, length, K, blocks, threads);
return kthValue;
}
else
{
//printf("Call PhaseOneR in parent function.\n");
kthValue = phaseOneR(d_vector, length, K, blocks, threads);
// printf("After Call PhaseOneR in parent function, kthvalue = %f.\n", kthValue);
return kthValue;
}
}
}
| de861bf89e8897966253597927e5d77b1685e610.cu | /* Copyright 2011 Russel Steinbach, Jeffrey Blanchard, Bradley Gordon,
* and Toluwaloju Alabi
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/transform_reduce.h>
#include <thrust/random.h>
#include <thrust/functional.h>
namespace RandomizedBucketSelect{
using namespace std;
#define MAX_THREADS_PER_BLOCK 1024
#define CUTOFF_POINT 200000
#define NUM_PIVOTS 17
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
/// ***********************************************************
/// ***********************************************************
/// **** HELPER CPU FUNCTIONS
/// ***********************************************************
/// ***********************************************************
cudaEvent_t start, stop;
float time;
void timing(int selection, int ind){
if(selection==0) {
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
}
else {
cudaThreadSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Time %d: %lf \n", ind, time);
}
}
template<typename T>
void cleanup(uint *h_c, T* d_k, int *etb, uint *bc){
free(h_c);
cudaFree(d_k);
cudaFree(etb);
cudaFree(bc);
}
//This function initializes a vector to all zeros on the host (CPU)
void setToAllZero(uint* deviceVector, int length){
cudaMemset(deviceVector, 0, length * sizeof(uint));
}
/// ***********************************************************
/// ***********************************************************
/// **** HELPER GPU FUNCTIONS-KERNELS
/// ***********************************************************
/// ***********************************************************
//this function assigns elements to buckets based off of a randomized sampling of the elements in the vector
template <typename T>
__global__ void assignSmartBucket(T * d_vector, int length, int numBuckets, double * slopes, T * pivots, int numPivots, uint* elementToBucket, uint* bucketCount, int offset){
int index = blockDim.x * blockIdx.x + threadIdx.x;
uint bucketIndex;
int threadIndex = threadIdx.x;
//variables in shared memory for fast access
__shared__ int sharedNumSmallBuckets;
if (threadIndex < 1)
sharedNumSmallBuckets = numBuckets / (numPivots-1);
extern __shared__ uint array[];
uint * sharedBuckets = (uint *)array;
double * sharedSlopes = (double *)&sharedBuckets[numBuckets];
T * sharedPivots = (T *)&sharedSlopes[numPivots-1];
/*
uint * sharedBuckets = (uint *)array;
double * sharedSlopes = (double *)&sharedBuckets[numBuckets];
T * sharedPivots = (T *)&sharedSlopes[numPivots-1];
// statically allocating the array gives faster results
__shared__ double sharedSlopes[NUM_PIVOTS-1];
__shared__ T sharedPivots[NUM_PIVOTS];
*/
//reading bucket counts into shared memory where increments will be performed
for (int i = 0; i < (numBuckets / MAX_THREADS_PER_BLOCK); i++)
if (threadIndex < numBuckets)
sharedBuckets[i * MAX_THREADS_PER_BLOCK + threadIndex] = 0;
if(threadIndex < numPivots) {
*(sharedPivots + threadIndex) = *(pivots + threadIndex);
if(threadIndex < numPivots-1)
sharedSlopes[threadIndex] = slopes[threadIndex];
}
syncthreads();
//assigning elements to buckets and incrementing the bucket counts
if(index < length) {
int i;
for(i = index; i < length; i += offset) {
T num = d_vector[i];
int minPivotIndex = 0;
int maxPivotIndex = numPivots-1;
int midPivotIndex;
// find the index of the pivot that is the greatest s.t. lower than or equal to num using binary search
//while (maxPivotIndex > minPivotIndex+1) {
for(int j = 1; j < numPivots - 1; j*=2) {
midPivotIndex = (maxPivotIndex + minPivotIndex) / 2;
if (num >= sharedPivots[midPivotIndex])
minPivotIndex = midPivotIndex;
else
maxPivotIndex = midPivotIndex;
}
bucketIndex = (minPivotIndex * sharedNumSmallBuckets) + (int) ((num - sharedPivots[minPivotIndex]) * sharedSlopes[minPivotIndex]);
elementToBucket[i] = bucketIndex;
// hashmap implementation set[bucketindex]=add.i;
//bucketCount[blockIdx.x * numBuckets + bucketIndex]++;
atomicInc (sharedBuckets + bucketIndex, length);
}
}
syncthreads();
//reading bucket counts from shared memory back to global memory
for (int i = 0; i < (numBuckets / MAX_THREADS_PER_BLOCK); i++)
if (threadIndex < numBuckets)
//atomicAdd(bucketCount + blockIdx.x * numBuckets + i * MAX_THREADS_PER_BLOCK + threadIndex, sharedBuckets[i * MAX_THREADS_PER_BLOCK + threadIndex]);
*(bucketCount + blockIdx.x * numBuckets + i * MAX_THREADS_PER_BLOCK + threadIndex) = *(sharedBuckets + i * MAX_THREADS_PER_BLOCK + threadIndex);
}
//this function assigns elements to buckets
template <typename T>
__global__ void assignBucket(T* d_vector, int length, int bucketNumbers, double slope, double minimum, int* bucket, uint* bucketCount, int offset){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int bucketIndex;
extern __shared__ uint sharedBuckets[];
int index = threadIdx.x;
//variables in shared memory for fast access
__shared__ int sbucketNums;
__shared__ double sMin;
sbucketNums = bucketNumbers;
sMin = minimum;
//reading bucket counts into shared memory where increments will be performed
for(int i=0; i < (bucketNumbers/1024); i++)
if(index < bucketNumbers)
sharedBuckets[i*1024+index] = 0;
syncthreads();
//assigning elements to buckets and incrementing the bucket counts
if(idx < length) {
int i;
for(i=idx; i< length; i+=offset){
//calculate the bucketIndex for each element
bucketIndex = (d_vector[i] - sMin) * slope;
//if it goes beyond the number of buckets, put it in the last bucket
if(bucketIndex >= sbucketNums)
bucketIndex = sbucketNums - 1;
bucket[i] = bucketIndex;
atomicInc(&sharedBuckets[bucketIndex], length);
}
}
syncthreads();
//reading bucket counts from shared memory back to global memory
for(int i=0; i < (bucketNumbers/1024); i++)
if(index < bucketNumbers)
atomicAdd(&bucketCount[i*1024+index], sharedBuckets[i*1024+index]);
}
//this function reassigns elements to buckets
template <typename T>
__global__ void reassignBucket(T* d_vector, int *bucket, uint *bucketCount, const int bucketNumbers, const int length, const double slope, const double maximum, const double minimum, int offset, int Kbucket){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
extern __shared__ uint sharedBuckets[];
int index = threadIdx.x;
int bucketIndex;
//reading bucket counts to shared memory where increments will be performed
if(index < bucketNumbers){
sharedBuckets[index] =0;
}
syncthreads();
//assigning elements to buckets and incrementing the bucket counts
if (idx < length){
int i;
for(i=idx; i<length; i+=offset){
if(bucket[i] != Kbucket){
bucket[i] = bucketNumbers+1;
}
else{
//calculate the bucketIndex for each element
bucketIndex = (d_vector[i] - minimum) * slope;
//if it goes beyond the number of buckets, put it in the last bucket
if(bucketIndex >= bucketNumbers){
bucketIndex = bucketNumbers - 1;
}
bucket[i] = bucketIndex;
atomicInc(&sharedBuckets[bucketIndex], length);
}
}
}
syncthreads();
//reading bucket counts from shared memory back to global memory
if(index < bucketNumbers){
atomicAdd(&bucketCount[index], sharedBuckets[index]);
}
}
//this function finds the bin containing the kth element we are looking for (works on the host)
inline int FindKBucket(uint *d_counter, uint *h_counter, const int numBuckets, const int k, uint * sum){
cudaMemcpy(sum, d_counter, sizeof(uint), cudaMemcpyDeviceToHost);
int Kbucket = 0;
if (*sum<k){
cudaMemcpy(h_counter, d_counter, numBuckets * sizeof(uint), cudaMemcpyDeviceToHost);
while ( (*sum<k) & (Kbucket<numBuckets-1)){
Kbucket++;
*sum += h_counter[Kbucket];
}
}
else{
cudaMemcpy(h_counter, d_counter, sizeof(uint), cudaMemcpyDeviceToHost);
}
return Kbucket;
}
//this function finds the bin containing the kth element we are looking for (works on the host)
inline int findKBucket(uint * d_bucketCount, uint * h_bucketCount, int numBuckets, int k, uint * sum, int numBlocks){
int sumsRowIndex= numBuckets * (numBlocks-1);
/*
for(int j=0; j<numBuckets; j++)
CUDA_CALL(cudaMemcpy(h_bucketCount + j, d_bucketCount + sumsRowIndex + j, sizeof(uint), cudaMemcpyDeviceToHost));
*/
CUDA_CALL(cudaMemcpy(h_bucketCount, d_bucketCount + sumsRowIndex, sizeof(uint) * numBuckets, cudaMemcpyDeviceToHost));
int kBucket = 0;
uint scanner = h_bucketCount[0];
while ((scanner < k) & (kBucket < numBuckets - 1)) {
kBucket++;
scanner += h_bucketCount[kBucket];
}
*(sum) = scanner - h_bucketCount[kBucket];
return kBucket;
}
__global__ void sumCounts(uint * d_bucketCount, const int numBuckets, const int numBlocks) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
for(int j=1; j<numBlocks; j++)
d_bucketCount[index + numBuckets*j] += d_bucketCount[index + numBuckets*(j-1)];
}
//copy elements in the kth bucket to a new array
template <typename T>
__global__ void copyElements (T* d_vector, int length, uint* elementToBucket, const int bucket, T* newArray, uint offset, uint * d_bucketCount, int numTotalBuckets){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int threadIndex = threadIdx.x;
__shared__ uint sharedBucket;
__shared__ uint sharedBucketCount;
if(threadIndex < 1) {
sharedBucket = bucket;
sharedBucketCount = d_bucketCount[blockIdx.x * numTotalBuckets + bucket];
}
syncthreads();
if(idx < length) {
for(int i=idx; i<length; i+=offset) {
if (elementToBucket[i] == sharedBucket)
//newArray[atomicDec(d_bucketCount + blockIdx.x * numTotalBuckets + temp, length)-1] = d_vector[i];
newArray[atomicDec(&sharedBucketCount, length) - 1] = d_vector[i];
}
}
}
//copy elements in the kth bucket to a new array
template <typename T>
__global__ void copyElement(T* d_vector, int length, int* elementToBucket, int bucket, T* newArray, uint* count, int offset){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < length){
for(int i=idx; i<length; i+=offset)
//copy elements in the kth bucket to the new array
if(elementToBucket[i] == bucket)
newArray[atomicInc(count, length)] = d_vector[i];
}
}
template <typename T>
__global__ void GetKvalue(T* d_vector, int * d_bucket, const int Kbucket, const int n, T* Kvalue, int offset )
{
uint xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (xIndex < n) {
int i;
for(i=xIndex; i<n; i+=offset){
if ( d_bucket[i] == Kbucket )
Kvalue[0] = d_vector[i];
}
}
}
/// ***********************************************************
/// ***********************************************************
/// **** GENERATE PIVOTS
/// ***********************************************************
/// ***********************************************************
__host__ __device__
unsigned int hash(unsigned int a) {
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
struct RandomNumberFunctor :
public thrust::unary_function<unsigned int, float> {
unsigned int mainSeed;
RandomNumberFunctor(unsigned int _mainSeed) :
mainSeed(_mainSeed) {}
__host__ __device__
float operator()(unsigned int threadIdx)
{
unsigned int seed = hash(threadIdx) * mainSeed;
thrust::default_random_engine rng(seed);
rng.discard(threadIdx);
thrust::uniform_real_distribution<float> u(0, 1);
return u(rng);
}
};
template <typename T>
void createRandomVector(T * d_vec, int size) {
timeval t1;
uint seed;
gettimeofday(&t1, NULL);
seed = t1.tv_usec * t1.tv_sec;
thrust::device_ptr<T> d_ptr(d_vec);
thrust::transform (thrust::counting_iterator<uint>(0),thrust::counting_iterator<uint>(size), d_ptr, RandomNumberFunctor(seed));
}
template <typename T>
__global__ void enlargeIndexAndGetElements (T * in, T * list, int size) {
*(in + blockIdx.x*blockDim.x + threadIdx.x) = *(list + ((int) (*(in + blockIdx.x * blockDim.x + threadIdx.x) * size)));
}
__global__ void enlargeIndexAndGetElements (float * in, uint * out, uint * list, int size) {
*(out + blockIdx.x * blockDim.x + threadIdx.x) = (uint) *(list + ((int) (*(in + blockIdx.x * blockDim.x + threadIdx.x) * size)));
}
template <typename T>
void generatePivots (T * pivots, double * slopes, T * d_list, int sizeOfVector, int numPivots, int sizeOfSample, int totalSmallBuckets, T min, T max) {
T * d_randoms;
int endOffset = 22;
int pivotOffset = (sizeOfSample - endOffset * 2) / (numPivots - 3);
int numSmallBuckets = totalSmallBuckets / (numPivots - 1);
cudaMalloc (&d_randoms, sizeof (T) * sizeOfSample);
createRandomVector (d_randoms, sizeOfSample);
// converts randoms floats into elements from necessary indices
enlargeIndexAndGetElements<<<(sizeOfSample/MAX_THREADS_PER_BLOCK), MAX_THREADS_PER_BLOCK>>>(d_randoms, d_list, sizeOfVector);
pivots[0] = min;
pivots[numPivots - 1] = max;
thrust::device_ptr<T>randoms_ptr(d_randoms);
thrust::sort(randoms_ptr, randoms_ptr + sizeOfSample);
cudaThreadSynchronize();
// set the pivots which are endOffset away from the min and max pivots
cudaMemcpy (pivots + 1, d_randoms + endOffset - 1, sizeof (T), cudaMemcpyDeviceToHost);
cudaMemcpy (pivots + numPivots - 2, d_randoms + sizeOfSample - endOffset - 1, sizeof (T), cudaMemcpyDeviceToHost);
slopes[0] = numSmallBuckets / (double) (pivots[1] - pivots[0]);
for (register int i = 2; i < numPivots - 2; i++) {
cudaMemcpy (pivots + i, d_randoms + pivotOffset * (i - 1) + endOffset - 1, sizeof (T), cudaMemcpyDeviceToHost);
slopes[i - 1] = numSmallBuckets / (double) (pivots[i] - pivots[i - 1]);
}
slopes[numPivots - 3] = numSmallBuckets / (double) (pivots[numPivots - 2] - pivots[numPivots - 3]);
slopes[numPivots - 2] = numSmallBuckets / (double) (pivots[numPivots - 1] - pivots[numPivots - 2]);
cudaFree(d_randoms);
}
/************************************************************************/
/************************************************************************/
//THIS IS THE PHASE TWO FUNCTION WHICH WILL BE CALLED IF THE INPUT
//LENGTH IS LESS THAN THE CUTOFF OF 2MILLION 200 THOUSAND
/************************************************************************/
/************************************************************************/
template <typename T>
T phaseTwo(T* d_vector, int length, int K, int blocks, int threads, double maxValue = 0, double minValue = 0){
//declaring and initializing variables for kernel launches
int threadsPerBlock = threads;
int numBlocks = blocks;
int numBuckets = 1024;
int offset = blocks * threads;
uint sum=0, Kbucket=0, iter=0;
int Kbucket_count = 0;
//initializing variables for kernel launches
if(length < 1024){
numBlocks = 1;
}
//variable to store the end result
T kthValue =0;
//declaring and initializing other variables
size_t size = length * sizeof(int);
size_t totalBucketSize = numBuckets * sizeof(uint);
//allocate memory to store bucket assignments and to count elements in buckets
int* elementToBucket;
uint* d_bucketCount;
cudaMalloc(&elementToBucket, size);
cudaMalloc(&d_bucketCount, totalBucketSize);
uint * h_bucketCount = (uint*)malloc(totalBucketSize);
T* d_Kth_val;
cudaMalloc(&d_Kth_val, sizeof(T));
thrust::device_ptr<T>dev_ptr(d_vector);
//if max == min, then we know that it must not have had the values passed in.
if(maxValue == minValue){
thrust::pair<thrust::device_ptr<T>, thrust::device_ptr<T> > result = thrust::minmax_element(dev_ptr, dev_ptr + length);
minValue = *result.first;
maxValue = *result.second;
}
double slope = (numBuckets - 1)/(maxValue - minValue);
//first check is max is equal to min
if(maxValue == minValue){
cleanup(h_bucketCount, d_Kth_val, elementToBucket,d_bucketCount);
return maxValue;
}
//make all entries of this vector equal to zero
setToAllZero(d_bucketCount, numBuckets);
//distribute elements to bucket
assignBucket<<<numBlocks, threadsPerBlock, numBuckets*sizeof(uint)>>>(d_vector, length, numBuckets, slope, minValue, elementToBucket, d_bucketCount, offset);
//find the bucket containing the kth element we want
Kbucket = FindKBucket(d_bucketCount, h_bucketCount, numBuckets, K, &sum);
Kbucket_count = h_bucketCount[Kbucket];
while ( (Kbucket_count > 1) && (iter < 1000)){
minValue = max(minValue, minValue + Kbucket/slope);
maxValue = min(maxValue, minValue + 1/slope);
K = K - sum + Kbucket_count;
if ( maxValue - minValue > 0.0f ){
slope = (numBuckets - 1)/(maxValue-minValue);
setToAllZero(d_bucketCount, numBuckets);
reassignBucket<<< numBlocks, threadsPerBlock, numBuckets * sizeof(uint) >>>(d_vector, elementToBucket, d_bucketCount, numBuckets,length, slope, maxValue, minValue, offset, Kbucket);
sum = 0;
Kbucket = FindKBucket(d_bucketCount, h_bucketCount, numBuckets, K, &sum);
Kbucket_count = h_bucketCount[Kbucket];
iter++;
}
else{
//if the max and min are the same, then we are done
cleanup(h_bucketCount, d_Kth_val, elementToBucket, d_bucketCount);
return maxValue;
}
}
GetKvalue<<<numBlocks, threadsPerBlock >>>(d_vector, elementToBucket, Kbucket, length, d_Kth_val, offset);
cudaMemcpy(&kthValue, d_Kth_val, sizeof(T), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cleanup(h_bucketCount, d_Kth_val, elementToBucket, d_bucketCount);
return kthValue;
}
/* this function finds the kth-largest element from the input array */
template <typename T>
T phaseOne(T* d_vector, int length, int K, int blocks, int threads, int pass = 0){
//declaring variables for kernel launches
int threadsPerBlock = threads;
int numBlocks = blocks;
int numBuckets = 1024;
int offset = blocks * threads;
int kthBucket, kthBucketCount;
int newInputLength;
int* elementToBucket; //array showing what bucket every element is in
//declaring and initializing other variables
uint *d_bucketCount, *count; //array showing the number of elements in each bucket
uint kthBucketScanner = 0;
size_t size = length * sizeof(int);
//variable to store the end result
T kthValue = 0;
T* newInput;
//find max and min with thrust
double maximum, minimum;
thrust::device_ptr<T>dev_ptr(d_vector);
thrust::pair<thrust::device_ptr<T>, thrust::device_ptr<T> > result = thrust::minmax_element(dev_ptr, dev_ptr + length);
minimum = *result.first;
maximum = *result.second;
//if the max and the min are the same, then we are done
if(maximum == minimum){
return maximum;
}
//if we want the max or min just return it
if(K == 1){
return minimum;
}
if(K == length){
return maximum;
}
//Allocate memory to store bucket assignments
CUDA_CALL(cudaMalloc(&elementToBucket, size));
//Allocate memory to store bucket counts
size_t totalBucketSize = numBuckets * sizeof(uint);
CUDA_CALL(cudaMalloc(&d_bucketCount, totalBucketSize));
uint* h_bucketCount = (uint*)malloc(totalBucketSize);
//Calculate max-min
double range = maximum - minimum;
//Calculate the slope, i.e numBuckets/range
double slope = (numBuckets - 1)/range;
cudaMalloc(&count, sizeof(uint));
//Set the bucket count vector to all zeros
setToAllZero(d_bucketCount, numBuckets);
//Distribute elements into their respective buckets
assignBucket<<<numBlocks, threadsPerBlock, numBuckets*sizeof(uint)>>>(d_vector, length, numBuckets, slope, minimum, elementToBucket, d_bucketCount, offset);
kthBucket = FindKBucket(d_bucketCount, h_bucketCount, numBuckets, K, &kthBucketScanner);
kthBucketCount = h_bucketCount[kthBucket];
printf("original kthBucketCount = %d\n", kthBucketCount);
//we must update K since we have reduced the problem size to elements in the kth bucket
if(kthBucket != 0){
K = kthBucketCount - (kthBucketScanner - K);
}
//copy elements in the kth bucket to a new array
cudaMalloc(&newInput, kthBucketCount * sizeof(T));
setToAllZero(count, 1);
copyElement<<<numBlocks, threadsPerBlock>>>(d_vector, length, elementToBucket, kthBucket, newInput, count, offset);
//store the length of the newly copied elements
newInputLength = kthBucketCount;
//if we only copied one element, then we are done
if(newInputLength == 1){
thrust::device_ptr<T>new_ptr(newInput);
kthValue = new_ptr[0];
//free all used memory
cudaFree(elementToBucket); cudaFree(d_bucketCount); cudaFree(count); cudaFree(newInput);
return kthValue;
}
/*********************************************************************/
//END OF FIRST PASS, NOW WE PROCEED TO SUBSEQUENT PASSES
/*********************************************************************/
//if the new length is greater than the CUTOFF, run the regular phaseOne again
if(newInputLength > CUTOFF_POINT && pass < 1){
if(pass > 0){
cudaFree(d_vector);
}
cudaFree(elementToBucket); cudaFree(d_bucketCount); cudaFree(count);
kthValue = phaseOne(newInput, newInputLength, K, blocks, threads,pass + 1);
}
else{
minimum = max(minimum, minimum + kthBucket/slope);
maximum = min(maximum, minimum + 1/slope);
kthValue = phaseTwo(newInput,newInputLength, K, blocks, threads,maximum, minimum);
}
//free all used memory
cudaFree(elementToBucket); cudaFree(d_bucketCount); cudaFree(newInput); cudaFree(count);
return kthValue;
}
/************************* BEGIN MAIN FUNCTIONS FOR RANDOMIZEDBLOCKEDBUCKETSELECT ************************/
/************************* BEGIN MAIN FUNCTIONS FOR RANDOMIZEDBLOCKEDBUCKETSELECT ************************/
/************************* BEGIN MAIN FUNCTIONS FOR RANDOMIZEDBLOCKEDBUCKETSELECT ************************/
/// ***********************************************************
/// ***********************************************************
/// **** MAIN FUNCTION
/// ***********************************************************
/// ***********************************************************
/* this function finds the kth-largest element from the input array */
template <typename T>
T phaseOneR(T* d_vector, int length, int K, int blocks, int threads, int pass = 0){
/// ***********************************************************
/// ****STEP 1: Find Min and Max of the whole vector
/// ****We don't need to go through the rest of the algorithm if it's flat
/// ***********************************************************
T maximum, minimum;
thrust::device_ptr<T>dev_ptr(d_vector);
thrust::pair<thrust::device_ptr<T>, thrust::device_ptr<T> > result = thrust::minmax_element(dev_ptr, dev_ptr + length);
minimum = *result.first;
maximum = *result.second;
//if the max and the min are the same, then we are done
if(maximum == minimum){
return maximum;
}
//if we want the max or min just return it
if(K == 1){
return minimum;
}
if(K == length){
return maximum;
}
/// ***********************************************************
/// ****STEP 2: Declare variables and allocate memory
/// **** Declare Variables
/// ***********************************************************
//declaring variables for kernel launches
int threadsPerBlock = threads;
int numBlocks = blocks;
int numBuckets = 4096;
int offset = blocks * threads;
// variables for the randomized selection
int numPivots = NUM_PIVOTS;
int sampleSize = MAX_THREADS_PER_BLOCK;
// pivot variables
double slopes[numPivots - 1];
double * d_slopes;
T pivots[numPivots];
T * d_pivots;
//Allocate memory to store bucket assignments
size_t size = length * sizeof(uint);
uint* d_elementToBucket; //array showing what bucket every element is in
CUDA_CALL(cudaMalloc(&d_elementToBucket, size));
//Allocate memory to store bucket counts
size_t totalBucketSize = numBlocks * numBuckets * sizeof(uint);
uint h_bucketCount[numBuckets]; //array showing the number of elements in each bucket
uint * d_bucketCount;
CUDA_CALL(cudaMalloc(&d_bucketCount, totalBucketSize));
// bucket counters
int kthBucket;
uint kthBucketScanner = 0;
// variable to store the end result
int newInputLength;
T* newInput;
T kthValue = 0;
/// ***********************************************************
/// ****STEP 3: Generate Pivots and Slopes
/// Declare slopes and pivots
/// ***********************************************************
CUDA_CALL(cudaMalloc(&d_slopes, (numPivots - 1) * sizeof(double)));
CUDA_CALL(cudaMalloc(&d_pivots, numPivots * sizeof(T)));
//Find bucket sizes using a randomized selection
generatePivots<T>(pivots, slopes, d_vector, length, numPivots, sampleSize, numBuckets, minimum, maximum);
// make any slopes that were infinity due to division by zero (due to no
// difference between the two associated pivots) into zero, so all the
// values which use that slope are projected into a single bucket
for (register int i = 0; i < numPivots - 1; i++)
if (isinf(slopes[i]))
slopes[i] = 0;
CUDA_CALL(cudaMemcpy(d_slopes, slopes, (numPivots - 1) * sizeof(double), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_pivots, pivots, numPivots * sizeof(T), cudaMemcpyHostToDevice));
/// ***********************************************************
/// ****STEP 4: Assign elements to buckets
///
/// ***********************************************************
//Distribute elements into their respective buckets
assignSmartBucket<T><<<numBlocks, threadsPerBlock, numPivots * sizeof(T) + (numPivots-1) * sizeof(double) + numBuckets * sizeof(uint)>>>(d_vector, length, numBuckets, d_slopes, d_pivots, numPivots, d_elementToBucket, d_bucketCount, offset);
sumCounts<<<numBuckets/threadsPerBlock, threadsPerBlock>>>(d_bucketCount, numBuckets, numBlocks);
/// ***********************************************************
/// ****STEP 5: Find the kth buckets
/// and their respective update indices
/// ***********************************************************
kthBucket = findKBucket(d_bucketCount, h_bucketCount, numBuckets, K, &kthBucketScanner, numBlocks);
newInputLength = h_bucketCount[kthBucket];
K -= kthBucketScanner;
printf("original kthBucketCount = %d\n", newInputLength);
/// ***********************************************************
/// ****STEP 6: Copy the kth buckets
/// only unique ones
/// ***********************************************************
// allocate memories
CUDA_CALL(cudaMalloc(&newInput, newInputLength * sizeof(T)));
copyElements<T><<<numBlocks, threadsPerBlock>>>(d_vector, length, d_elementToBucket, kthBucket, newInput, offset, d_bucketCount, numBuckets);
//if we only copied one element, then we are done
if(newInputLength == 1){
thrust::device_ptr<T>new_ptr(newInput);
kthValue = new_ptr[0];
//free all used memory
cudaFree(d_bucketCount);
cudaFree(d_elementToBucket);
cudaFree(d_pivots);
cudaFree(d_slopes);
cudaFree(newInput);
return kthValue;
}
/*********************************************************************/
//END OF FIRST PASS, NOW WE PROCEED TO SUBSEQUENT PASSES
/*********************************************************************/
//if the new length is greater than the CUTOFF, run the regular phaseOne again
if(newInputLength > CUTOFF_POINT && pass < 1){
if(pass > 0){
cudaFree(d_vector);
}
cudaFree(d_bucketCount);
cudaFree(d_elementToBucket);
cudaFree(d_pivots);
cudaFree(d_slopes);
kthValue = phaseOne(newInput, newInputLength, K, blocks, threads,pass + 1);
}
else{
// find boundaries of kth bucket
int pivotOffset = numBuckets / (numPivots - 1);
int pivotIndex = kthBucket/pivotOffset;
int pivotInnerindex = kthBucket - pivotOffset * pivotIndex;
minimum = max(minimum, (T) (pivots[pivotIndex] + pivotInnerindex / slopes[pivotIndex]));
maximum = min(maximum, (T) (pivots[pivotIndex] + (pivotInnerindex+1) / slopes[pivotIndex]));
if (newInputLength<33000) {
thrust::device_ptr<T>newInput_ptr(newInput);
thrust::sort(newInput_ptr, newInput_ptr + newInputLength);
cudaMemcpy (&kthValue, newInput + K - 1, sizeof (T), cudaMemcpyDeviceToHost);
} else
kthValue = phaseTwo(newInput,newInputLength, K, blocks, threads,maximum, minimum);
}
//free all used memory
cudaFree(d_elementToBucket);
cudaFree(d_bucketCount);
cudaFree(d_slopes);
cudaFree(d_pivots);
cudaFree(newInput);
return kthValue;
}
/**************************************************************************/
/**************************************************************************/
//THIS IS THE RANDOMIZEDBUCKETSELECT FUNCTION WRAPPER THAT CHOOSES THE CORRECT
//VERSION OF BUCKET SELECT TO RUN BASED ON THE INPUT LENGTH
/**************************************************************************/
template <typename T>
T randomizedBucketSelectWrapper(T* d_vector, int length, int K, int blocks, int threads)
{
T kthValue;
//change K to be the kth smallest
K = length - K + 1;
if(length <= CUTOFF_POINT)
{
kthValue = phaseTwo(d_vector, length, K, blocks, threads);
return kthValue;
}
else
{
//printf("Call PhaseOneR in parent function.\n");
kthValue = phaseOneR(d_vector, length, K, blocks, threads);
// printf("After Call PhaseOneR in parent function, kthvalue = %f.\n", kthValue);
return kthValue;
}
}
}
|
2b30c2a0e641515d8e0eb0fe1a636131afc4e6e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <blas_quda.h>
#include <tune_quda.h>
#include <float_vector.h>
// For kernels with precision conversion built in
#define checkSpinorLength(a, b) \
{ \
if (a.Length() != b.Length()) \
errorQuda("lengths do not match: %d %d", a.Length(), b.Length()); \
if (a.Stride() != b.Stride()) \
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \
}
namespace quda {
QudaTune getBlasTuning();
QudaVerbosity getBlasVerbosity();
hipStream_t* getBlasStream();
namespace copy {
#include <texture.h>
static struct {
int x[QUDA_MAX_DIM];
int stride;
} blasConstants;
template <typename FloatN, int N, typename Output, typename Input>
__global__ void copyKernel(Output Y, Input X, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
FloatN x[N];
X.load(x, i);
Y.save(x, i);
i += gridSize;
}
}
template <typename FloatN, int N, typename Output, typename Input>
class CopyCuda : public Tunable {
private:
Input &X;
Output &Y;
const int length;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
virtual bool advanceSharedBytes(TuneParam ¶m) const
{
TuneParam next(param);
advanceBlockDim(next); // to get next blockDim
int nthreads = next.block.x * next.block.y * next.block.z;
param.shared_bytes = sharedBytesPerThread()*nthreads > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*nthreads : sharedBytesPerBlock(param);
return false;
}
public:
CopyCuda(Output &Y, Input &X, int length) : X(X), Y(Y), length(length) { ; }
virtual ~CopyCuda() { ; }
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << blasConstants.x[0] << "x";
vol << blasConstants.x[1] << "x";
vol << blasConstants.x[2] << "x";
vol << blasConstants.x[3];
aux << "stride=" << blasConstants.stride << ",out_prec=" << Y.Precision() << ",in_prec=" << X.Precision();
return TuneKey(vol.str(), "copyKernel", aux.str());
}
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getBlasTuning(), getBlasVerbosity());
hipLaunchKernelGGL(( copyKernel<FloatN, N>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, Y, X, length);
}
void preTune() { ; } // no need to save state for copy kernels
void postTune() { ; } // no need to restore state for copy kernels
long long flops() const { return 0; }
long long bytes() const {
const int Ninternal = (sizeof(FloatN)/sizeof(((FloatN*)0)->x))*N;
size_t bytes = (X.Precision() + Y.Precision())*Ninternal;
if (X.Precision() == QUDA_HALF_PRECISION) bytes += sizeof(float);
if (Y.Precision() == QUDA_HALF_PRECISION) bytes += sizeof(float);
return bytes*length;
}
};
void copyCuda(cudaColorSpinorField &dst, const cudaColorSpinorField &src) {
if (&src == &dst) return; // aliasing fields
if (src.Nspin() != 1 && src.Nspin() != 4) errorQuda("nSpin(%d) not supported\n", src.Nspin());
if (dst.SiteSubset() == QUDA_FULL_SITE_SUBSET || src.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
copy::copyCuda(dst.Even(), src.Even());
copy::copyCuda(dst.Odd(), src.Odd());
return;
}
checkSpinorLength(dst, src);
for (int d=0; d<QUDA_MAX_DIM; d++) blasConstants.x[d] = src.X()[d];
blasConstants.stride = src.Stride();
// For a given dst precision, there are two non-trivial possibilities for the
// src precision.
blas_bytes += src.RealLength()*((int)src.Precision() + (int)dst.Precision());
if (dst.Precision() == src.Precision()) {
hipMemcpy(dst.V(), src.V(), dst.Bytes(), hipMemcpyDeviceToDevice);
if (dst.Precision() == QUDA_HALF_PRECISION) {
hipMemcpy(dst.Norm(), src.Norm(), dst.NormBytes(), hipMemcpyDeviceToDevice);
blas_bytes += 2*dst.RealLength()*sizeof(float);
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
if (src.Nspin() == 4){
SpinorTexture<float4, float4, float4, 6, 0> src_tex(src);
Spinor<float4, float2, double2, 6> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float2, double2, 6>,
SpinorTexture<float4, float4, float4, 6, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //src.Nspin() == 1
SpinorTexture<float2, float2, float2, 3, 0> src_tex(src);
Spinor<float2, float2, double2, 3> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, double2, 3>,
SpinorTexture<float2, float2, float2, 3, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
if (src.Nspin() == 4){
SpinorTexture<float4, float2, double2, 6, 0> src_tex(src);
Spinor<float4, float4, float4, 6> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float4, float4, 6>,
SpinorTexture<float4, float2, double2, 6, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //src.Nspin() ==1
SpinorTexture<float2, float2, double2, 3, 0> src_tex(src);
Spinor<float2, float2, float2, 3> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, float2, 3>,
SpinorTexture<float2, float2, double2, 3, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
blas_bytes += src.Volume()*sizeof(float);
if (src.Nspin() == 4){
SpinorTexture<float4, float4, short4, 6, 0> src_tex(src);
Spinor<float4, float4, float4, 6> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float4, float4, 6>,
SpinorTexture<float4, float4, short4, 6, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //nSpin== 1;
SpinorTexture<float2, float2, short2, 3, 0> src_tex(src);
Spinor<float2, float2, float2, 3> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, float2, 3>,
SpinorTexture<float2, float2, short2, 3, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
blas_bytes += dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
SpinorTexture<float4, float4, float4, 6, 0> src_tex(src);
Spinor<float4, float4, short4, 6> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float4, short4, 6>,
SpinorTexture<float4, float4, float4, 6, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //nSpin == 1
SpinorTexture<float2, float2, float2, 3, 0> src_tex(src);
Spinor<float2, float2, short2, 3> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, short2, 3>,
SpinorTexture<float2, float2, float2, 3, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
blas_bytes += src.Volume()*sizeof(float);
if (src.Nspin() == 4){
SpinorTexture<double2, float4, short4, 12, 0> src_tex(src);
Spinor<double2, double2, double2, 12> dst_spinor(dst);
CopyCuda<double2, 12, Spinor<double2, double2, double2, 12>,
SpinorTexture<double2, float4, short4, 12, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //nSpin == 1
SpinorTexture<double2, float2, short2, 3, 0> src_tex(src);
Spinor<double2, double2, double2, 3> dst_spinor(dst);
CopyCuda<double2, 3, Spinor<double2, double2, double2, 3>,
SpinorTexture<double2, float2, short2, 3, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
blas_bytes += dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
SpinorTexture<double2, double2, double2, 12, 0> src_tex(src);
Spinor<double2, double4, short4, 12> dst_spinor(dst);
CopyCuda<double2, 12, Spinor<double2, double4, short4, 12>,
SpinorTexture<double2, double2, double2, 12, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //nSpin == 1
SpinorTexture<double2, double2, double2, 3, 0> src_tex(src);
Spinor<double2, double2, short2, 3> dst_spinor(dst);
CopyCuda<double2, 3, Spinor<double2, double2, short2, 3>,
SpinorTexture<double2, double2, double2, 3, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else {
errorQuda("Invalid precision combination dst=%d and src=%d", dst.Precision(), src.Precision());
}
checkCudaError();
}
} // namespace copy
void copyCuda(cudaColorSpinorField &dst, const cudaColorSpinorField &src) {
copy::copyCuda(dst, src);
}
} // namespace quda
| 2b30c2a0e641515d8e0eb0fe1a636131afc4e6e9.cu | #include <blas_quda.h>
#include <tune_quda.h>
#include <float_vector.h>
// For kernels with precision conversion built in
#define checkSpinorLength(a, b) \
{ \
if (a.Length() != b.Length()) \
errorQuda("lengths do not match: %d %d", a.Length(), b.Length()); \
if (a.Stride() != b.Stride()) \
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \
}
namespace quda {
QudaTune getBlasTuning();
QudaVerbosity getBlasVerbosity();
cudaStream_t* getBlasStream();
namespace copy {
#include <texture.h>
static struct {
int x[QUDA_MAX_DIM];
int stride;
} blasConstants;
template <typename FloatN, int N, typename Output, typename Input>
__global__ void copyKernel(Output Y, Input X, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
FloatN x[N];
X.load(x, i);
Y.save(x, i);
i += gridSize;
}
}
template <typename FloatN, int N, typename Output, typename Input>
class CopyCuda : public Tunable {
private:
Input &X;
Output &Y;
const int length;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
virtual bool advanceSharedBytes(TuneParam ¶m) const
{
TuneParam next(param);
advanceBlockDim(next); // to get next blockDim
int nthreads = next.block.x * next.block.y * next.block.z;
param.shared_bytes = sharedBytesPerThread()*nthreads > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*nthreads : sharedBytesPerBlock(param);
return false;
}
public:
CopyCuda(Output &Y, Input &X, int length) : X(X), Y(Y), length(length) { ; }
virtual ~CopyCuda() { ; }
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << blasConstants.x[0] << "x";
vol << blasConstants.x[1] << "x";
vol << blasConstants.x[2] << "x";
vol << blasConstants.x[3];
aux << "stride=" << blasConstants.stride << ",out_prec=" << Y.Precision() << ",in_prec=" << X.Precision();
return TuneKey(vol.str(), "copyKernel", aux.str());
}
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getBlasTuning(), getBlasVerbosity());
copyKernel<FloatN, N><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(Y, X, length);
}
void preTune() { ; } // no need to save state for copy kernels
void postTune() { ; } // no need to restore state for copy kernels
long long flops() const { return 0; }
long long bytes() const {
const int Ninternal = (sizeof(FloatN)/sizeof(((FloatN*)0)->x))*N;
size_t bytes = (X.Precision() + Y.Precision())*Ninternal;
if (X.Precision() == QUDA_HALF_PRECISION) bytes += sizeof(float);
if (Y.Precision() == QUDA_HALF_PRECISION) bytes += sizeof(float);
return bytes*length;
}
};
void copyCuda(cudaColorSpinorField &dst, const cudaColorSpinorField &src) {
if (&src == &dst) return; // aliasing fields
if (src.Nspin() != 1 && src.Nspin() != 4) errorQuda("nSpin(%d) not supported\n", src.Nspin());
if (dst.SiteSubset() == QUDA_FULL_SITE_SUBSET || src.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
copy::copyCuda(dst.Even(), src.Even());
copy::copyCuda(dst.Odd(), src.Odd());
return;
}
checkSpinorLength(dst, src);
for (int d=0; d<QUDA_MAX_DIM; d++) blasConstants.x[d] = src.X()[d];
blasConstants.stride = src.Stride();
// For a given dst precision, there are two non-trivial possibilities for the
// src precision.
blas_bytes += src.RealLength()*((int)src.Precision() + (int)dst.Precision());
if (dst.Precision() == src.Precision()) {
cudaMemcpy(dst.V(), src.V(), dst.Bytes(), cudaMemcpyDeviceToDevice);
if (dst.Precision() == QUDA_HALF_PRECISION) {
cudaMemcpy(dst.Norm(), src.Norm(), dst.NormBytes(), cudaMemcpyDeviceToDevice);
blas_bytes += 2*dst.RealLength()*sizeof(float);
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
if (src.Nspin() == 4){
SpinorTexture<float4, float4, float4, 6, 0> src_tex(src);
Spinor<float4, float2, double2, 6> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float2, double2, 6>,
SpinorTexture<float4, float4, float4, 6, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //src.Nspin() == 1
SpinorTexture<float2, float2, float2, 3, 0> src_tex(src);
Spinor<float2, float2, double2, 3> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, double2, 3>,
SpinorTexture<float2, float2, float2, 3, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
if (src.Nspin() == 4){
SpinorTexture<float4, float2, double2, 6, 0> src_tex(src);
Spinor<float4, float4, float4, 6> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float4, float4, 6>,
SpinorTexture<float4, float2, double2, 6, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //src.Nspin() ==1
SpinorTexture<float2, float2, double2, 3, 0> src_tex(src);
Spinor<float2, float2, float2, 3> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, float2, 3>,
SpinorTexture<float2, float2, double2, 3, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
blas_bytes += src.Volume()*sizeof(float);
if (src.Nspin() == 4){
SpinorTexture<float4, float4, short4, 6, 0> src_tex(src);
Spinor<float4, float4, float4, 6> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float4, float4, 6>,
SpinorTexture<float4, float4, short4, 6, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //nSpin== 1;
SpinorTexture<float2, float2, short2, 3, 0> src_tex(src);
Spinor<float2, float2, float2, 3> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, float2, 3>,
SpinorTexture<float2, float2, short2, 3, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
blas_bytes += dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
SpinorTexture<float4, float4, float4, 6, 0> src_tex(src);
Spinor<float4, float4, short4, 6> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float4, short4, 6>,
SpinorTexture<float4, float4, float4, 6, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //nSpin == 1
SpinorTexture<float2, float2, float2, 3, 0> src_tex(src);
Spinor<float2, float2, short2, 3> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, short2, 3>,
SpinorTexture<float2, float2, float2, 3, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
blas_bytes += src.Volume()*sizeof(float);
if (src.Nspin() == 4){
SpinorTexture<double2, float4, short4, 12, 0> src_tex(src);
Spinor<double2, double2, double2, 12> dst_spinor(dst);
CopyCuda<double2, 12, Spinor<double2, double2, double2, 12>,
SpinorTexture<double2, float4, short4, 12, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //nSpin == 1
SpinorTexture<double2, float2, short2, 3, 0> src_tex(src);
Spinor<double2, double2, double2, 3> dst_spinor(dst);
CopyCuda<double2, 3, Spinor<double2, double2, double2, 3>,
SpinorTexture<double2, float2, short2, 3, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
blas_bytes += dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
SpinorTexture<double2, double2, double2, 12, 0> src_tex(src);
Spinor<double2, double4, short4, 12> dst_spinor(dst);
CopyCuda<double2, 12, Spinor<double2, double4, short4, 12>,
SpinorTexture<double2, double2, double2, 12, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
} else { //nSpin == 1
SpinorTexture<double2, double2, double2, 3, 0> src_tex(src);
Spinor<double2, double2, short2, 3> dst_spinor(dst);
CopyCuda<double2, 3, Spinor<double2, double2, short2, 3>,
SpinorTexture<double2, double2, double2, 3, 0> >
copy(dst_spinor, src_tex, src.Volume());
copy.apply(*getBlasStream());
}
} else {
errorQuda("Invalid precision combination dst=%d and src=%d", dst.Precision(), src.Precision());
}
checkCudaError();
}
} // namespace copy
void copyCuda(cudaColorSpinorField &dst, const cudaColorSpinorField &src) {
copy::copyCuda(dst, src);
}
} // namespace quda
|
72be7eddc8dc25618953a6b484c3546a8de78349.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Mesher.hpp"
#include "SparseOctree.hpp"
#include "Intersection.cuh"
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#define WorkGroupSize (256)
#define MaxMesherBatchSize (WorkGroupSize * 40000ULL)
#define MaxVertexBatchSize (50000)
#define EpsTreshold (0.0001f)
#define xBitPattern (0x249249249249249ULL) // only 20 levels
#define yBitPattern (xBitPattern << 1)
#define zBitPattern (xBitPattern << 2)
#define xFillPattern (~xBitPattern)
#define yFillPattern (~yBitPattern)
#define zFillPattern (~zBitPattern)
#define Voxel_xPlus(v) ((((v | xFillPattern) + 0b001) & xBitPattern) | (v & yBitPattern) | (v & zBitPattern))
#define Voxel_xMinus(v) ((((v & xBitPattern) - 0b001) & xBitPattern) | (v & yBitPattern) | (v & zBitPattern))
#define Voxel_yPlus(v) ((((v | yFillPattern) + 0b010) & yBitPattern) | (v & xBitPattern) | (v & zBitPattern))
#define Voxel_yMinus(v) ((((v & yBitPattern) - 0b010) & yBitPattern) | (v & xBitPattern) | (v & zBitPattern))
#define Voxel_zPlus(v) ((((v | zFillPattern) + 0b100) & zBitPattern) | (v & xBitPattern) | (v & yBitPattern))
#define Voxel_zMinus(v) ((((v & zBitPattern) - 0b100) & zBitPattern) | (v & xBitPattern) | (v & yBitPattern))
using namespace glm;
using namespace std;
using namespace thrust;
using namespace UltraLod;
struct Vec3Less // Doesn't work if this is in anonymous struct
{
__device__ __host__
inline bool operator()(const vec3& v0, const vec3& v1) const
{
auto xd = v0.x - v1.x;
auto yd = v0.y - v1.y;
auto zd = v0.z - v1.z;
if (xd < -EpsTreshold) return true;
else if (xd > EpsTreshold) return false;
if (yd < -EpsTreshold) return true;
else if (yd > EpsTreshold) return false;
if (zd < -EpsTreshold) return true;
else if (zd > EpsTreshold) return false;
return false;
}
};
struct Vec3Equals
{
__device__ __host__
inline bool operator()(const vec3& v0, const vec3& v1) const
{
return
fabsf(v0.x - v1.x) <= EpsTreshold &&
fabsf(v0.y - v1.y) <= EpsTreshold &&
fabsf(v0.z - v1.z) <= EpsTreshold;
//v0.x == v1.x &&
//v0.y == v1.y &&
//v0.z == v1.z;
}
};
namespace
{
struct FaceCountConversion
{
__device__
inline int operator()(uint8_t face) const
{
return __popc((uint32_t)face);
}
};
struct ShrinkToDepth
{
public:
ShrinkToDepth(int depthReduce)
: m_depthReduce(depthReduce)
{ }
__device__ __host__
inline Voxel operator()(const Voxel& voxel) const
{
// Strip flag bit
Voxel stripVoxel = voxel & ~(1ULL << 63);
return stripVoxel >> (m_depthReduce * 3);
}
private:
int m_depthReduce;
};
// Binary search search
__device__ __host__
inline bool SolidVoxel(const Voxel* data, int len, const Voxel& voxel)
{
int lo = 0;
int hi = len - 1;
while (lo <= hi)
{
int mid = (lo + hi) / 2;
if (data[mid] > voxel)
hi = mid - 1;
else if (data[mid] < voxel)
lo = mid + 1;
else
return true;
}
return false;
}
__device__ __host__
inline Aabb GetVoxelBounds(const Voxel& voxel, const Aabb& rootBounds, float voxelSize)
{
// De-interleave bits
uint64_t x = 0;
uint64_t y = 0;
uint64_t z = 0;
for (int i = 0; i < 21; i++)
{
x |= (voxel >> (2 * i + 0)) & (1ULL << i);
y |= (voxel >> (2 * i + 1)) & (1ULL << i);
z |= (voxel >> (2 * i + 2)) & (1ULL << i);
}
vec3 min = vec3((int)x, (int)y, (int)z) * voxelSize;
vec3 max = min + vec3(voxelSize, voxelSize, voxelSize);
min += rootBounds.min;
max += rootBounds.min;
return { min, max };
}
__global__
void kernelCreateAdjacencyInfo(const Voxel* voxels, int count, uint8_t* outAdjacency)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (!(idx < count))
return;
auto& voxel = voxels[idx];
// Skip this if not solid (shouldn't be possible though)
if (!SolidVoxel(voxels, count, voxel))
return;
uint8_t adjacency = 0;
// Check each side for solid voxels
adjacency |= (!SolidVoxel(voxels, count, Voxel_xMinus(voxel)) << 0);
adjacency |= (!SolidVoxel(voxels, count, Voxel_xPlus(voxel)) << 1);
adjacency |= (!SolidVoxel(voxels, count, Voxel_yMinus(voxel)) << 2);
adjacency |= (!SolidVoxel(voxels, count, Voxel_yPlus(voxel)) << 3);
adjacency |= (!SolidVoxel(voxels, count, Voxel_zMinus(voxel)) << 4);
adjacency |= (!SolidVoxel(voxels, count, Voxel_zPlus(voxel)) << 5);
outAdjacency[idx] = adjacency;
}
__global__
void kernelCreateTriangleIndices(const int* vertexMap, int faceCount, int* outIndices)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (!(idx < faceCount))
return;
auto inIdx = idx * 4;
auto outIdx = idx * 6;
outIndices[outIdx++] = vertexMap[inIdx + 0];
outIndices[outIdx++] = vertexMap[inIdx + 2];
outIndices[outIdx++] = vertexMap[inIdx + 1];
outIndices[outIdx++] = vertexMap[inIdx + 2];
outIndices[outIdx++] = vertexMap[inIdx + 0];
outIndices[outIdx++] = vertexMap[inIdx + 3];
}
__global__
void kernelSampleClosestDistances(
const SparseOctree::Node* nodes,
Aabb treeBounds,
const vec3* vertices,
int vertexCount,
int sampleCount,
float voxelSize,
float sampleDist,
vec3* outClosestPoints,
float* outClosestDistances)
{
int vIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (!(vIdx < vertexCount))
return;
int sIdx = threadIdx.y;
int outIdx = vIdx * sampleCount + sIdx;
// Get vertex to test
auto& vertex = vertices[vIdx];
// Use halton sequence to determine ray direction
// TODO: This should be uniform distribution over unit sphere!
vec3 dir =
{
Halton<3>(sIdx) * 2.0f - 1.0f,
Halton<7>(sIdx) * 2.0f - 1.0f,
Halton<11>(sIdx) * 2.0f - 1.0f
};
dir = Normalize(dir);
// Shoot the ray!
ColorRGB24 outColor; // dummy
float tMin = 0.0f;
float tMax = sampleDist;
NodeRayCast(nodes, nullptr, vertex, dir, tMin, tMax, outColor, treeBounds);
// Store result if collision happened
if (tMax < sampleDist)
{
auto np = vertex + dir * tMax;
// Make sure vertex is not moved too much
np.x = __min(__max(np.x, vertex.x - voxelSize), vertex.x + voxelSize);
np.y = __min(__max(np.y, vertex.y - voxelSize), vertex.y + voxelSize);
np.z = __min(__max(np.z, vertex.z - voxelSize), vertex.z + voxelSize);
outClosestPoints[outIdx] = np;
outClosestDistances[outIdx] = tMax;
}
else
outClosestDistances[outIdx] = FLT_MAX;
}
__global__
void kernelFilterClosestDistance(const vec3* closestPoints, const float* closestDistances, int vertexCount, int sampleCount, vec3* outPoints)
{
int vIdx = blockIdx.x * blockDim.x + threadIdx.x;
int sIdx = vIdx * sampleCount;
if (!(vIdx < vertexCount))
return;
float closestDistance = FLT_MAX;
vec3 closestPoint = vec3(0, 0, 0);
// Go through points
for (int i = 0; i < sampleCount; i++)
{
auto& dist = closestDistances[sIdx + i];
if (dist < closestDistance)
{
closestDistance = dist;
// Store closest point candidate
closestPoint = closestPoints[sIdx + i];
}
}
if (closestDistance < FLT_MAX)
outPoints[vIdx] = closestPoint;
}
__global__
void kernelTriangulate(const Voxel* voxels, const uint8_t* adjacency, const int* faceIndices, int count, Aabb bounds, float voxelSize, vec3* outVertices)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (!(idx < count))
return;
// Get voxel information
auto& voxel = voxels[idx];
auto startIdx = faceIndices[idx];
auto faces = adjacency[idx];
auto faceCount = __popc(faces);
auto vertIdx = startIdx * 4;
if (!faceCount)
return;
// Evaluate voxel bounds
auto voxelBounds = GetVoxelBounds(voxel, bounds, voxelSize);
auto& min = voxelBounds.min;
auto& vs = voxelSize;
#define AppendTri() AppendIdx(0); AppendIdx(1); AppendIdx(2); AppendIdx(2); AppendIdx(3); AppendIdx(0);
#define AppendVert(x, y, z) (outVertices[vertIdx++] = min + vec3(x, y, z))
// Go through each face
if (faces & 0b000001) // xm
{
AppendVert(0, 0, vs);
AppendVert(0, 0, 0);
AppendVert(0, vs, 0);
AppendVert(0, vs, vs);
}
if (faces & 0b000010) // xp
{
AppendVert(vs, 0, 0);
AppendVert(vs, 0, vs);
AppendVert(vs, vs, vs);
AppendVert(vs, vs, 0);
}
if (faces & 0b000100) // ym
{
AppendVert(0, 0, vs);
AppendVert(vs, 0, vs);
AppendVert(vs, 0, 0);
AppendVert(0, 0, 0);
}
if (faces & 0b001000) // yp
{
AppendVert(0, vs, 0);
AppendVert(vs, vs, 0);
AppendVert(vs, vs, vs);
AppendVert(0, vs, vs);
}
if (faces & 0b010000) // zm
{
AppendVert(0, 0, 0);
AppendVert(vs, 0, 0);
AppendVert(vs, vs, 0);
AppendVert(0, vs, 0);
}
if (faces & 0b100000) // zp
{
AppendVert(vs, 0, vs);
AppendVert(0, 0, vs);
AppendVert(0, vs, vs);
AppendVert(vs, vs, vs);
}
}
#undef AppendVert
#undef AppendTri
#undef AppendIdx
}
namespace UltraLod
{
// CudaMesher
class CudaMesher
{
public:
CudaMesher(const SparseOctree& tree, const vector<Voxel>& voxels, vector<vec3>& outVertices, vector<int>& outIndices);
void Meshify(int treeDepth);
private:
void GenerateAdjacencyInfo();
void GenerateSolidVoxels(int targetDepth);
void GenerateVoxelFaceGeometry(int depth);
void MinimizePositionError();
//void GenerateSolidVoxels(int targetDepth, int startDepth, Voxel parentVoxel);
private:
const SparseOctree& m_tree;
const vector<Voxel>& m_voxels;
device_vector<Voxel> m_dSolidVoxels;
device_vector<uint8_t> m_dAdjacentVoxels;
device_vector<vec3> m_dVertices;
vector<Voxel> m_solidVoxels;
float m_voxelSize;
vector<vec3>& m_outVertices;
vector<int>& m_outIndices;
};
CudaMesher::CudaMesher(const SparseOctree& tree, const vector<Voxel>& voxels, vector<vec3>& outVertices, vector<int>& outIndices)
: m_tree(tree)
, m_voxels(voxels)
, m_outVertices(outVertices)
, m_outIndices(outIndices)
{ }
void CudaMesher::Meshify(int targetDepth)
{
// Validate depth
auto treeDepth = m_tree.GetDepth();
if (targetDepth >= treeDepth)
return;
// Generate solid voxels
GenerateSolidVoxels(targetDepth);
if (m_solidVoxels.size() == 0)
return;
// Generate adjacency information (check all sides for solid voxels)
GenerateAdjacencyInfo();
// Generate voxel face geometry
GenerateVoxelFaceGeometry(targetDepth);
// Finally try to minimize error of generated geometry by moving them closer to original surface
MinimizePositionError();
}
void CudaMesher::GenerateAdjacencyInfo()
{
auto voxelCount = (int)m_solidVoxels.size();
assert(voxelCount > 0);
// Instantiate voxel bit flags
m_dAdjacentVoxels.resize(voxelCount);
thrust::fill(m_dAdjacentVoxels.begin(), m_dAdjacentVoxels.end(), 0);
// Load voxels to gpu (although should alread be there)
m_dSolidVoxels.resize(voxelCount);
ThrowIfFailed(hipMemcpy(m_dSolidVoxels.data().get(), m_solidVoxels.data(), voxelCount * sizeof(Voxel), hipMemcpyHostToDevice));
// Launch the kernel!
auto workCount = NextMultipleOf<int, WorkGroupSize>(voxelCount);
auto groupCount = workCount / WorkGroupSize;
hipLaunchKernelGGL(( kernelCreateAdjacencyInfo), dim3((int)groupCount), dim3(WorkGroupSize), 0, 0,
m_dSolidVoxels.data().get(),
voxelCount,
m_dAdjacentVoxels.data().get());
ThrowIfFailed(hipGetLastError());
ThrowIfFailed(hipDeviceSynchronize());
}
void CudaMesher::GenerateSolidVoxels(int targetDepth)
{
auto depthDiff = m_tree.GetDepth() - targetDepth;
assert(m_tree.GetDepth() > targetDepth);
// Evalute voxel size of generated mesh
m_voxelSize = m_tree.GetBounds().Size().x / (1 << targetDepth);
// Init gpu variables
m_dSolidVoxels.resize(::min(m_voxels.size(), MaxMesherBatchSize));
// Go through voxels as batches
uint64_t startIdx = 0;
int batchCount = 0;
while (startIdx < m_voxels.size())
{
// Evaluate batch size
auto batchSize = ::min(MaxMesherBatchSize, m_voxels.size() - startIdx);
// Copy voxels of this batch to gpu
ThrowIfFailed(hipMemcpy(m_dSolidVoxels.data().get(), m_voxels.data() + startIdx, batchSize * sizeof(Voxel), hipMemcpyHostToDevice));
// Shrink them to the target depth
thrust::transform(m_dSolidVoxels.begin(), m_dSolidVoxels.begin() + batchSize, m_dSolidVoxels.begin(), ShrinkToDepth(depthDiff));
// Get unique only
thrust::stable_sort(m_dSolidVoxels.begin(), m_dSolidVoxels.begin() + batchSize);
auto newEnd = thrust::unique(m_dSolidVoxels.begin(), m_dSolidVoxels.begin() + batchSize);
// Copy data back to cpu
auto solidVoxelCount = newEnd - m_dSolidVoxels.begin();
auto prevCount = m_solidVoxels.size();
m_solidVoxels.resize(m_solidVoxels.size() + solidVoxelCount);
ThrowIfFailed(hipMemcpy(m_solidVoxels.data() + prevCount, m_dSolidVoxels.data().get(), solidVoxelCount * sizeof(Voxel), hipMemcpyDeviceToHost));
// Update start index for the next batch
startIdx += batchSize;
batchCount++;
}
if (batchCount > 1)
{
// Load all back to gpu for final sort
assert(m_solidVoxels.size() <= MaxMesherBatchSize);
auto solidVoxelCount = ::min(m_solidVoxels.size(), MaxMesherBatchSize);
if (m_dSolidVoxels.size() < solidVoxelCount)
m_dSolidVoxels.resize(solidVoxelCount);
ThrowIfFailed(hipMemcpy(m_dSolidVoxels.data().get(), m_solidVoxels.data(), solidVoxelCount * sizeof(Voxel), hipMemcpyHostToDevice));
// Get unique solid voxels
thrust::stable_sort(m_dSolidVoxels.begin(), m_dSolidVoxels.begin() + solidVoxelCount);
auto newEnd = thrust::unique(m_dSolidVoxels.begin(), m_dSolidVoxels.begin() + solidVoxelCount);
// Copy data back to cpu
solidVoxelCount = newEnd - m_dSolidVoxels.begin();
m_solidVoxels.resize(solidVoxelCount);
ThrowIfFailed(hipMemcpy(m_solidVoxels.data(), m_dSolidVoxels.data().get(), solidVoxelCount * sizeof(Voxel), hipMemcpyDeviceToHost));
}
}
void CudaMesher::GenerateVoxelFaceGeometry(int depth)
{
auto voxelCount = m_solidVoxels.size();
assert(voxelCount > 0);
assert(voxelCount == m_dAdjacentVoxels.size());
// Read total number of faces
auto faceCount = thrust::transform_reduce(m_dAdjacentVoxels.begin(), m_dAdjacentVoxels.begin() + voxelCount, FaceCountConversion(), 0, thrust::plus<int>());
// Get starting indices for voxel face geometry
device_vector<int> voxelFaceStartIndices(faceCount);
thrust::transform(m_dAdjacentVoxels.begin(), m_dAdjacentVoxels.end(), voxelFaceStartIndices.begin(), FaceCountConversion());
thrust::exclusive_scan(voxelFaceStartIndices.begin(), voxelFaceStartIndices.begin() + voxelCount, voxelFaceStartIndices.begin(), 0, thrust::plus<int>());
// Allocate memory for output geometry
device_vector<vec3> dOrigVertices(faceCount * 4);
// Triangulate faces!
auto groupCount = (int)(NextMultipleOf<uint64_t, WorkGroupSize>(voxelCount) / WorkGroupSize);
auto& bounds = m_tree.GetBounds();
auto voxelSize = bounds.Size().x / (1 << depth);
hipLaunchKernelGGL(( kernelTriangulate), dim3(groupCount), dim3(WorkGroupSize), 0, 0,
m_dSolidVoxels.data().get(),
m_dAdjacentVoxels.data().get(),
voxelFaceStartIndices.data().get(),
(int)voxelCount,
m_tree.GetBounds(),
voxelSize,
dOrigVertices.data().get());
ThrowIfFailed(hipGetLastError());
ThrowIfFailed(hipDeviceSynchronize());
// Remove duplicate vertices
m_dVertices = dOrigVertices;
// Gather duplicate vertices together
thrust::sort(m_dVertices.begin(), m_dVertices.end(), Vec3Less());
// Remove duplicates
auto newEnd = thrust::unique(m_dVertices.begin(), m_dVertices.end(), Vec3Equals());
m_dVertices.erase(newEnd, m_dVertices.end());
// Create mappings to this new list
device_vector<int> dIndexMapping(dOrigVertices.size());
thrust::lower_bound(
thrust::device,
m_dVertices.begin(), m_dVertices.end(),
dOrigVertices.begin(), dOrigVertices.end(),
dIndexMapping.begin(),
Vec3Less());
// Create triangle indices
device_vector<int> dIndices(faceCount * 6);
groupCount = NextMultipleOf<int, WorkGroupSize>(faceCount) / WorkGroupSize;
hipLaunchKernelGGL(( kernelCreateTriangleIndices), dim3(groupCount), dim3(WorkGroupSize), 0, 0,
dIndexMapping.data().get(),
faceCount,
dIndices.data().get());
// Copy indices back to ram. Vertices are still being modified!
m_outIndices.resize(dIndices.size());
ThrowIfFailed(hipMemcpy(m_outIndices.data(), dIndices.data().get(), dIndices.size() * sizeof(int), hipMemcpyDeviceToHost));
}
void CudaMesher::MinimizePositionError()
{
const uint32_t cSampleCount = 256;
auto& nodes = m_tree.GetTreeNodes();
auto vCount = (int)m_dVertices.size();
auto bufferSize = ::min(vCount, MaxVertexBatchSize) * cSampleCount;
thrust::device_vector<SparseOctree::Node> dTree(nodes.size());
thrust::device_vector<vec3> dClosestPositions(bufferSize);
thrust::device_vector<float> dClosestDistances(bufferSize);
// Resize output vertex array
m_outVertices.resize(vCount);
// Copy tree to vram
thrust::copy(nodes.data(), nodes.data() + nodes.size(), dTree.begin());
// Go through batches
int startIdx = 0;
while (startIdx < vCount)
{
// Evaluate batch size
auto batchSize = ::min(MaxVertexBatchSize, (int)vCount - startIdx);
const auto xDimSize = 2u;
dim3 groupSize = { xDimSize, cSampleCount, 1 };
auto groupCount = NextMultipleOf<int, xDimSize>(batchSize) / xDimSize;
// Launch kernel to compute closest distances
hipLaunchKernelGGL(( kernelSampleClosestDistances), dim3(groupCount), dim3(groupSize), 0, 0,
dTree.data().get(),
m_tree.GetBounds(),
m_dVertices.data().get() + startIdx,
batchSize,
cSampleCount,
m_voxelSize,
m_voxelSize * 1.73f, // Use voxel size as max sampling distance
dClosestPositions.data().get(),
dClosestDistances.data().get());
ThrowIfFailed(hipGetLastError());
// Launch a second kernel to get minimum distance from candidates
groupCount = NextMultipleOf<int, WorkGroupSize>(batchSize) / WorkGroupSize;
hipLaunchKernelGGL(( kernelFilterClosestDistance), dim3(groupCount), dim3(WorkGroupSize), 0, 0,
dClosestPositions.data().get(),
dClosestDistances.data().get(),
batchSize,
cSampleCount,
m_dVertices.data().get() + startIdx);
ThrowIfFailed(hipGetLastError());
ThrowIfFailed(hipDeviceSynchronize());
// Finally copy vertices back to ram
thrust::copy(m_dVertices.begin() + startIdx, m_dVertices.begin() + startIdx + batchSize, m_outVertices.data() + startIdx);
startIdx += batchSize;
}
}
// Mesher
Mesher::Mesher(const SparseOctree& tree, const vector<Voxel>& voxels, vector<vec3>& outVertices, vector<int>& outIndices)
: m_tree(tree)
, m_voxels(voxels)
, m_outVertices(outVertices)
, m_outIndices(outIndices)
{ }
void Mesher::Meshify(int targetDepth)
{
assert(targetDepth <= 21 && targetDepth > 0);
// Pass call forward to cuda mesher
CudaMesher mesher(m_tree, m_voxels, m_outVertices, m_outIndices);
mesher.Meshify(targetDepth);
}
} | 72be7eddc8dc25618953a6b484c3546a8de78349.cu | #include "Mesher.hpp"
#include "SparseOctree.hpp"
#include "Intersection.cuh"
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#define WorkGroupSize (256)
#define MaxMesherBatchSize (WorkGroupSize * 40000ULL)
#define MaxVertexBatchSize (50000)
#define EpsTreshold (0.0001f)
#define xBitPattern (0x249249249249249ULL) // only 20 levels
#define yBitPattern (xBitPattern << 1)
#define zBitPattern (xBitPattern << 2)
#define xFillPattern (~xBitPattern)
#define yFillPattern (~yBitPattern)
#define zFillPattern (~zBitPattern)
#define Voxel_xPlus(v) ((((v | xFillPattern) + 0b001) & xBitPattern) | (v & yBitPattern) | (v & zBitPattern))
#define Voxel_xMinus(v) ((((v & xBitPattern) - 0b001) & xBitPattern) | (v & yBitPattern) | (v & zBitPattern))
#define Voxel_yPlus(v) ((((v | yFillPattern) + 0b010) & yBitPattern) | (v & xBitPattern) | (v & zBitPattern))
#define Voxel_yMinus(v) ((((v & yBitPattern) - 0b010) & yBitPattern) | (v & xBitPattern) | (v & zBitPattern))
#define Voxel_zPlus(v) ((((v | zFillPattern) + 0b100) & zBitPattern) | (v & xBitPattern) | (v & yBitPattern))
#define Voxel_zMinus(v) ((((v & zBitPattern) - 0b100) & zBitPattern) | (v & xBitPattern) | (v & yBitPattern))
using namespace glm;
using namespace std;
using namespace thrust;
using namespace UltraLod;
struct Vec3Less // Doesn't work if this is in anonymous struct
{
__device__ __host__
inline bool operator()(const vec3& v0, const vec3& v1) const
{
auto xd = v0.x - v1.x;
auto yd = v0.y - v1.y;
auto zd = v0.z - v1.z;
if (xd < -EpsTreshold) return true;
else if (xd > EpsTreshold) return false;
if (yd < -EpsTreshold) return true;
else if (yd > EpsTreshold) return false;
if (zd < -EpsTreshold) return true;
else if (zd > EpsTreshold) return false;
return false;
}
};
struct Vec3Equals
{
__device__ __host__
inline bool operator()(const vec3& v0, const vec3& v1) const
{
return
fabsf(v0.x - v1.x) <= EpsTreshold &&
fabsf(v0.y - v1.y) <= EpsTreshold &&
fabsf(v0.z - v1.z) <= EpsTreshold;
//v0.x == v1.x &&
//v0.y == v1.y &&
//v0.z == v1.z;
}
};
namespace
{
struct FaceCountConversion
{
__device__
inline int operator()(uint8_t face) const
{
return __popc((uint32_t)face);
}
};
struct ShrinkToDepth
{
public:
ShrinkToDepth(int depthReduce)
: m_depthReduce(depthReduce)
{ }
__device__ __host__
inline Voxel operator()(const Voxel& voxel) const
{
// Strip flag bit
Voxel stripVoxel = voxel & ~(1ULL << 63);
return stripVoxel >> (m_depthReduce * 3);
}
private:
int m_depthReduce;
};
// Binary search search
__device__ __host__
inline bool SolidVoxel(const Voxel* data, int len, const Voxel& voxel)
{
int lo = 0;
int hi = len - 1;
while (lo <= hi)
{
int mid = (lo + hi) / 2;
if (data[mid] > voxel)
hi = mid - 1;
else if (data[mid] < voxel)
lo = mid + 1;
else
return true;
}
return false;
}
__device__ __host__
inline Aabb GetVoxelBounds(const Voxel& voxel, const Aabb& rootBounds, float voxelSize)
{
// De-interleave bits
uint64_t x = 0;
uint64_t y = 0;
uint64_t z = 0;
for (int i = 0; i < 21; i++)
{
x |= (voxel >> (2 * i + 0)) & (1ULL << i);
y |= (voxel >> (2 * i + 1)) & (1ULL << i);
z |= (voxel >> (2 * i + 2)) & (1ULL << i);
}
vec3 min = vec3((int)x, (int)y, (int)z) * voxelSize;
vec3 max = min + vec3(voxelSize, voxelSize, voxelSize);
min += rootBounds.min;
max += rootBounds.min;
return { min, max };
}
__global__
void kernelCreateAdjacencyInfo(const Voxel* voxels, int count, uint8_t* outAdjacency)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (!(idx < count))
return;
auto& voxel = voxels[idx];
// Skip this if not solid (shouldn't be possible though)
if (!SolidVoxel(voxels, count, voxel))
return;
uint8_t adjacency = 0;
// Check each side for solid voxels
adjacency |= (!SolidVoxel(voxels, count, Voxel_xMinus(voxel)) << 0);
adjacency |= (!SolidVoxel(voxels, count, Voxel_xPlus(voxel)) << 1);
adjacency |= (!SolidVoxel(voxels, count, Voxel_yMinus(voxel)) << 2);
adjacency |= (!SolidVoxel(voxels, count, Voxel_yPlus(voxel)) << 3);
adjacency |= (!SolidVoxel(voxels, count, Voxel_zMinus(voxel)) << 4);
adjacency |= (!SolidVoxel(voxels, count, Voxel_zPlus(voxel)) << 5);
outAdjacency[idx] = adjacency;
}
__global__
void kernelCreateTriangleIndices(const int* vertexMap, int faceCount, int* outIndices)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (!(idx < faceCount))
return;
auto inIdx = idx * 4;
auto outIdx = idx * 6;
outIndices[outIdx++] = vertexMap[inIdx + 0];
outIndices[outIdx++] = vertexMap[inIdx + 2];
outIndices[outIdx++] = vertexMap[inIdx + 1];
outIndices[outIdx++] = vertexMap[inIdx + 2];
outIndices[outIdx++] = vertexMap[inIdx + 0];
outIndices[outIdx++] = vertexMap[inIdx + 3];
}
__global__
void kernelSampleClosestDistances(
const SparseOctree::Node* nodes,
Aabb treeBounds,
const vec3* vertices,
int vertexCount,
int sampleCount,
float voxelSize,
float sampleDist,
vec3* outClosestPoints,
float* outClosestDistances)
{
int vIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (!(vIdx < vertexCount))
return;
int sIdx = threadIdx.y;
int outIdx = vIdx * sampleCount + sIdx;
// Get vertex to test
auto& vertex = vertices[vIdx];
// Use halton sequence to determine ray direction
// TODO: This should be uniform distribution over unit sphere!
vec3 dir =
{
Halton<3>(sIdx) * 2.0f - 1.0f,
Halton<7>(sIdx) * 2.0f - 1.0f,
Halton<11>(sIdx) * 2.0f - 1.0f
};
dir = Normalize(dir);
// Shoot the ray!
ColorRGB24 outColor; // dummy
float tMin = 0.0f;
float tMax = sampleDist;
NodeRayCast(nodes, nullptr, vertex, dir, tMin, tMax, outColor, treeBounds);
// Store result if collision happened
if (tMax < sampleDist)
{
auto np = vertex + dir * tMax;
// Make sure vertex is not moved too much
np.x = __min(__max(np.x, vertex.x - voxelSize), vertex.x + voxelSize);
np.y = __min(__max(np.y, vertex.y - voxelSize), vertex.y + voxelSize);
np.z = __min(__max(np.z, vertex.z - voxelSize), vertex.z + voxelSize);
outClosestPoints[outIdx] = np;
outClosestDistances[outIdx] = tMax;
}
else
outClosestDistances[outIdx] = FLT_MAX;
}
__global__
void kernelFilterClosestDistance(const vec3* closestPoints, const float* closestDistances, int vertexCount, int sampleCount, vec3* outPoints)
{
int vIdx = blockIdx.x * blockDim.x + threadIdx.x;
int sIdx = vIdx * sampleCount;
if (!(vIdx < vertexCount))
return;
float closestDistance = FLT_MAX;
vec3 closestPoint = vec3(0, 0, 0);
// Go through points
for (int i = 0; i < sampleCount; i++)
{
auto& dist = closestDistances[sIdx + i];
if (dist < closestDistance)
{
closestDistance = dist;
// Store closest point candidate
closestPoint = closestPoints[sIdx + i];
}
}
if (closestDistance < FLT_MAX)
outPoints[vIdx] = closestPoint;
}
__global__
void kernelTriangulate(const Voxel* voxels, const uint8_t* adjacency, const int* faceIndices, int count, Aabb bounds, float voxelSize, vec3* outVertices)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (!(idx < count))
return;
// Get voxel information
auto& voxel = voxels[idx];
auto startIdx = faceIndices[idx];
auto faces = adjacency[idx];
auto faceCount = __popc(faces);
auto vertIdx = startIdx * 4;
if (!faceCount)
return;
// Evaluate voxel bounds
auto voxelBounds = GetVoxelBounds(voxel, bounds, voxelSize);
auto& min = voxelBounds.min;
auto& vs = voxelSize;
#define AppendTri() AppendIdx(0); AppendIdx(1); AppendIdx(2); AppendIdx(2); AppendIdx(3); AppendIdx(0);
#define AppendVert(x, y, z) (outVertices[vertIdx++] = min + vec3(x, y, z))
// Go through each face
if (faces & 0b000001) // xm
{
AppendVert(0, 0, vs);
AppendVert(0, 0, 0);
AppendVert(0, vs, 0);
AppendVert(0, vs, vs);
}
if (faces & 0b000010) // xp
{
AppendVert(vs, 0, 0);
AppendVert(vs, 0, vs);
AppendVert(vs, vs, vs);
AppendVert(vs, vs, 0);
}
if (faces & 0b000100) // ym
{
AppendVert(0, 0, vs);
AppendVert(vs, 0, vs);
AppendVert(vs, 0, 0);
AppendVert(0, 0, 0);
}
if (faces & 0b001000) // yp
{
AppendVert(0, vs, 0);
AppendVert(vs, vs, 0);
AppendVert(vs, vs, vs);
AppendVert(0, vs, vs);
}
if (faces & 0b010000) // zm
{
AppendVert(0, 0, 0);
AppendVert(vs, 0, 0);
AppendVert(vs, vs, 0);
AppendVert(0, vs, 0);
}
if (faces & 0b100000) // zp
{
AppendVert(vs, 0, vs);
AppendVert(0, 0, vs);
AppendVert(0, vs, vs);
AppendVert(vs, vs, vs);
}
}
#undef AppendVert
#undef AppendTri
#undef AppendIdx
}
namespace UltraLod
{
// CudaMesher
class CudaMesher
{
public:
CudaMesher(const SparseOctree& tree, const vector<Voxel>& voxels, vector<vec3>& outVertices, vector<int>& outIndices);
void Meshify(int treeDepth);
private:
void GenerateAdjacencyInfo();
void GenerateSolidVoxels(int targetDepth);
void GenerateVoxelFaceGeometry(int depth);
void MinimizePositionError();
//void GenerateSolidVoxels(int targetDepth, int startDepth, Voxel parentVoxel);
private:
const SparseOctree& m_tree;
const vector<Voxel>& m_voxels;
device_vector<Voxel> m_dSolidVoxels;
device_vector<uint8_t> m_dAdjacentVoxels;
device_vector<vec3> m_dVertices;
vector<Voxel> m_solidVoxels;
float m_voxelSize;
vector<vec3>& m_outVertices;
vector<int>& m_outIndices;
};
CudaMesher::CudaMesher(const SparseOctree& tree, const vector<Voxel>& voxels, vector<vec3>& outVertices, vector<int>& outIndices)
: m_tree(tree)
, m_voxels(voxels)
, m_outVertices(outVertices)
, m_outIndices(outIndices)
{ }
void CudaMesher::Meshify(int targetDepth)
{
// Validate depth
auto treeDepth = m_tree.GetDepth();
if (targetDepth >= treeDepth)
return;
// Generate solid voxels
GenerateSolidVoxels(targetDepth);
if (m_solidVoxels.size() == 0)
return;
// Generate adjacency information (check all sides for solid voxels)
GenerateAdjacencyInfo();
// Generate voxel face geometry
GenerateVoxelFaceGeometry(targetDepth);
// Finally try to minimize error of generated geometry by moving them closer to original surface
MinimizePositionError();
}
void CudaMesher::GenerateAdjacencyInfo()
{
auto voxelCount = (int)m_solidVoxels.size();
assert(voxelCount > 0);
// Instantiate voxel bit flags
m_dAdjacentVoxels.resize(voxelCount);
thrust::fill(m_dAdjacentVoxels.begin(), m_dAdjacentVoxels.end(), 0);
// Load voxels to gpu (although should alread be there)
m_dSolidVoxels.resize(voxelCount);
ThrowIfFailed(cudaMemcpy(m_dSolidVoxels.data().get(), m_solidVoxels.data(), voxelCount * sizeof(Voxel), cudaMemcpyHostToDevice));
// Launch the kernel!
auto workCount = NextMultipleOf<int, WorkGroupSize>(voxelCount);
auto groupCount = workCount / WorkGroupSize;
kernelCreateAdjacencyInfo<<<(int)groupCount, WorkGroupSize>>>(
m_dSolidVoxels.data().get(),
voxelCount,
m_dAdjacentVoxels.data().get());
ThrowIfFailed(cudaGetLastError());
ThrowIfFailed(cudaDeviceSynchronize());
}
void CudaMesher::GenerateSolidVoxels(int targetDepth)
{
auto depthDiff = m_tree.GetDepth() - targetDepth;
assert(m_tree.GetDepth() > targetDepth);
// Evalute voxel size of generated mesh
m_voxelSize = m_tree.GetBounds().Size().x / (1 << targetDepth);
// Init gpu variables
m_dSolidVoxels.resize(std::min(m_voxels.size(), MaxMesherBatchSize));
// Go through voxels as batches
uint64_t startIdx = 0;
int batchCount = 0;
while (startIdx < m_voxels.size())
{
// Evaluate batch size
auto batchSize = std::min(MaxMesherBatchSize, m_voxels.size() - startIdx);
// Copy voxels of this batch to gpu
ThrowIfFailed(cudaMemcpy(m_dSolidVoxels.data().get(), m_voxels.data() + startIdx, batchSize * sizeof(Voxel), cudaMemcpyHostToDevice));
// Shrink them to the target depth
thrust::transform(m_dSolidVoxels.begin(), m_dSolidVoxels.begin() + batchSize, m_dSolidVoxels.begin(), ShrinkToDepth(depthDiff));
// Get unique only
thrust::stable_sort(m_dSolidVoxels.begin(), m_dSolidVoxels.begin() + batchSize);
auto newEnd = thrust::unique(m_dSolidVoxels.begin(), m_dSolidVoxels.begin() + batchSize);
// Copy data back to cpu
auto solidVoxelCount = newEnd - m_dSolidVoxels.begin();
auto prevCount = m_solidVoxels.size();
m_solidVoxels.resize(m_solidVoxels.size() + solidVoxelCount);
ThrowIfFailed(cudaMemcpy(m_solidVoxels.data() + prevCount, m_dSolidVoxels.data().get(), solidVoxelCount * sizeof(Voxel), cudaMemcpyDeviceToHost));
// Update start index for the next batch
startIdx += batchSize;
batchCount++;
}
if (batchCount > 1)
{
// Load all back to gpu for final sort
assert(m_solidVoxels.size() <= MaxMesherBatchSize);
auto solidVoxelCount = std::min(m_solidVoxels.size(), MaxMesherBatchSize);
if (m_dSolidVoxels.size() < solidVoxelCount)
m_dSolidVoxels.resize(solidVoxelCount);
ThrowIfFailed(cudaMemcpy(m_dSolidVoxels.data().get(), m_solidVoxels.data(), solidVoxelCount * sizeof(Voxel), cudaMemcpyHostToDevice));
// Get unique solid voxels
thrust::stable_sort(m_dSolidVoxels.begin(), m_dSolidVoxels.begin() + solidVoxelCount);
auto newEnd = thrust::unique(m_dSolidVoxels.begin(), m_dSolidVoxels.begin() + solidVoxelCount);
// Copy data back to cpu
solidVoxelCount = newEnd - m_dSolidVoxels.begin();
m_solidVoxels.resize(solidVoxelCount);
ThrowIfFailed(cudaMemcpy(m_solidVoxels.data(), m_dSolidVoxels.data().get(), solidVoxelCount * sizeof(Voxel), cudaMemcpyDeviceToHost));
}
}
void CudaMesher::GenerateVoxelFaceGeometry(int depth)
{
auto voxelCount = m_solidVoxels.size();
assert(voxelCount > 0);
assert(voxelCount == m_dAdjacentVoxels.size());
// Read total number of faces
auto faceCount = thrust::transform_reduce(m_dAdjacentVoxels.begin(), m_dAdjacentVoxels.begin() + voxelCount, FaceCountConversion(), 0, thrust::plus<int>());
// Get starting indices for voxel face geometry
device_vector<int> voxelFaceStartIndices(faceCount);
thrust::transform(m_dAdjacentVoxels.begin(), m_dAdjacentVoxels.end(), voxelFaceStartIndices.begin(), FaceCountConversion());
thrust::exclusive_scan(voxelFaceStartIndices.begin(), voxelFaceStartIndices.begin() + voxelCount, voxelFaceStartIndices.begin(), 0, thrust::plus<int>());
// Allocate memory for output geometry
device_vector<vec3> dOrigVertices(faceCount * 4);
// Triangulate faces!
auto groupCount = (int)(NextMultipleOf<uint64_t, WorkGroupSize>(voxelCount) / WorkGroupSize);
auto& bounds = m_tree.GetBounds();
auto voxelSize = bounds.Size().x / (1 << depth);
kernelTriangulate<<<groupCount, WorkGroupSize>>>(
m_dSolidVoxels.data().get(),
m_dAdjacentVoxels.data().get(),
voxelFaceStartIndices.data().get(),
(int)voxelCount,
m_tree.GetBounds(),
voxelSize,
dOrigVertices.data().get());
ThrowIfFailed(cudaGetLastError());
ThrowIfFailed(cudaDeviceSynchronize());
// Remove duplicate vertices
m_dVertices = dOrigVertices;
// Gather duplicate vertices together
thrust::sort(m_dVertices.begin(), m_dVertices.end(), Vec3Less());
// Remove duplicates
auto newEnd = thrust::unique(m_dVertices.begin(), m_dVertices.end(), Vec3Equals());
m_dVertices.erase(newEnd, m_dVertices.end());
// Create mappings to this new list
device_vector<int> dIndexMapping(dOrigVertices.size());
thrust::lower_bound(
thrust::device,
m_dVertices.begin(), m_dVertices.end(),
dOrigVertices.begin(), dOrigVertices.end(),
dIndexMapping.begin(),
Vec3Less());
// Create triangle indices
device_vector<int> dIndices(faceCount * 6);
groupCount = NextMultipleOf<int, WorkGroupSize>(faceCount) / WorkGroupSize;
kernelCreateTriangleIndices<<<groupCount, WorkGroupSize>>>(
dIndexMapping.data().get(),
faceCount,
dIndices.data().get());
// Copy indices back to ram. Vertices are still being modified!
m_outIndices.resize(dIndices.size());
ThrowIfFailed(cudaMemcpy(m_outIndices.data(), dIndices.data().get(), dIndices.size() * sizeof(int), cudaMemcpyDeviceToHost));
}
void CudaMesher::MinimizePositionError()
{
const uint32_t cSampleCount = 256;
auto& nodes = m_tree.GetTreeNodes();
auto vCount = (int)m_dVertices.size();
auto bufferSize = std::min(vCount, MaxVertexBatchSize) * cSampleCount;
thrust::device_vector<SparseOctree::Node> dTree(nodes.size());
thrust::device_vector<vec3> dClosestPositions(bufferSize);
thrust::device_vector<float> dClosestDistances(bufferSize);
// Resize output vertex array
m_outVertices.resize(vCount);
// Copy tree to vram
thrust::copy(nodes.data(), nodes.data() + nodes.size(), dTree.begin());
// Go through batches
int startIdx = 0;
while (startIdx < vCount)
{
// Evaluate batch size
auto batchSize = std::min(MaxVertexBatchSize, (int)vCount - startIdx);
const auto xDimSize = 2u;
dim3 groupSize = { xDimSize, cSampleCount, 1 };
auto groupCount = NextMultipleOf<int, xDimSize>(batchSize) / xDimSize;
// Launch kernel to compute closest distances
kernelSampleClosestDistances<<<groupCount, groupSize>>>(
dTree.data().get(),
m_tree.GetBounds(),
m_dVertices.data().get() + startIdx,
batchSize,
cSampleCount,
m_voxelSize,
m_voxelSize * 1.73f, // Use voxel size as max sampling distance
dClosestPositions.data().get(),
dClosestDistances.data().get());
ThrowIfFailed(cudaGetLastError());
// Launch a second kernel to get minimum distance from candidates
groupCount = NextMultipleOf<int, WorkGroupSize>(batchSize) / WorkGroupSize;
kernelFilterClosestDistance<<<groupCount, WorkGroupSize>>>(
dClosestPositions.data().get(),
dClosestDistances.data().get(),
batchSize,
cSampleCount,
m_dVertices.data().get() + startIdx);
ThrowIfFailed(cudaGetLastError());
ThrowIfFailed(cudaDeviceSynchronize());
// Finally copy vertices back to ram
thrust::copy(m_dVertices.begin() + startIdx, m_dVertices.begin() + startIdx + batchSize, m_outVertices.data() + startIdx);
startIdx += batchSize;
}
}
// Mesher
Mesher::Mesher(const SparseOctree& tree, const vector<Voxel>& voxels, vector<vec3>& outVertices, vector<int>& outIndices)
: m_tree(tree)
, m_voxels(voxels)
, m_outVertices(outVertices)
, m_outIndices(outIndices)
{ }
void Mesher::Meshify(int targetDepth)
{
assert(targetDepth <= 21 && targetDepth > 0);
// Pass call forward to cuda mesher
CudaMesher mesher(m_tree, m_voxels, m_outVertices, m_outIndices);
mesher.Meshify(targetDepth);
}
} |
1fef2f23d073e4b3a801816a48d36f73f4e1180a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "rocblas.h"
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
#define m 5
#define n 6
#define ku 2
#define kl 1
int main(void){
hipblasHandle_t handle;
int i,j;
float* a;
float* x;
float* y;
hipMallocManaged(&a,m*n*sizeof(float));
hipMallocManaged(&x,n*sizeof(float));
hipMallocManaged(&y,n*sizeof(float));
int ind=11;
for(i = ku; i < n; i++) a[IDX2C(0,i,m)]=(float)ind++;
for(i = ku-1; i < n; i++) a[IDX2C(1,i,m)]=(float)ind++;
for(i = 0; i < n-1; i++) a[IDX2C(ku,i,m)]=(float)ind++;
for(i = 0; i < n-2; i++) a[IDX2C(ku+1,i,m)]=(float)ind++;
for(i = 0; i < n; i++) x[i] = 1.0f;
for(i = 0; i < m; i++) y[i] = 0.0f;
hipblasCreate(&handle);
float al = 1.0f;
float bet = 1.0f;
hipblasSgbmv(handle, HIPBLAS_OP_N, m, n, kl, ku, &al, a, m, x, 1, &bet, y, 1);
hipDeviceSynchronize();
printf("y after Sgbmv:\n");
for(j = 0; j < m; j++){
printf("%7.0f", y[j]);
printf("\n");
}
hipFree(a);
hipFree(x);
hipFree(y);
hipblasDestroy(handle);
return EXIT_SUCCESS;
}
| 1fef2f23d073e4b3a801816a48d36f73f4e1180a.cu | #include <stdio.h>
#include "cublas_v2.h"
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
#define m 5
#define n 6
#define ku 2
#define kl 1
int main(void){
cublasHandle_t handle;
int i,j;
float* a;
float* x;
float* y;
cudaMallocManaged(&a,m*n*sizeof(float));
cudaMallocManaged(&x,n*sizeof(float));
cudaMallocManaged(&y,n*sizeof(float));
int ind=11;
for(i = ku; i < n; i++) a[IDX2C(0,i,m)]=(float)ind++;
for(i = ku-1; i < n; i++) a[IDX2C(1,i,m)]=(float)ind++;
for(i = 0; i < n-1; i++) a[IDX2C(ku,i,m)]=(float)ind++;
for(i = 0; i < n-2; i++) a[IDX2C(ku+1,i,m)]=(float)ind++;
for(i = 0; i < n; i++) x[i] = 1.0f;
for(i = 0; i < m; i++) y[i] = 0.0f;
cublasCreate(&handle);
float al = 1.0f;
float bet = 1.0f;
cublasSgbmv(handle, CUBLAS_OP_N, m, n, kl, ku, &al, a, m, x, 1, &bet, y, 1);
cudaDeviceSynchronize();
printf("y after Sgbmv:\n");
for(j = 0; j < m; j++){
printf("%7.0f", y[j]);
printf("\n");
}
cudaFree(a);
cudaFree(x);
cudaFree(y);
cublasDestroy(handle);
return EXIT_SUCCESS;
}
|
f786aa0d6139e7a69d2198bad87277b6bcafb6d6.hip | // !!! This is a file automatically generated by hipify!!!
/*//------------------------------------------------------------------------------------------------------------
| TEST delle funzioni di base della classe (funzionalita'):
| inizializzazione della matrice
| print
| inizializzazione Random
| resize
*///------------------------------------------------------------------------------------------------------------
#include <iostream>
#include <stdio.h>
using namespace std;
#include <cstdlib>
#include <stdlib.h> /* srand, rand */ //http://www.cplusplus.com/reference/cstdlib/rand/
#include <fstream> //http://www.cplusplus.com/doc/tutorial/files/
#include "Src/Ausiliary/CudaCrono.cuh"
#include "Src/Cuda_FloatMatrixClass.cuh"
int main(void){
int n1=5;
int n2=5;
//cout<<" #righe = ";cin>>n1;cout<<endl;
//cout<<" #colonne = ";cin>>n2;cout<<endl;
//Eventi per le statistiche
hipEvent_t T1, T2, T3, T4;
hipEventCreate(&T1);
hipEventCreate(&T2);
hipEventCreate(&T3);
hipEventCreate(&T4);
float diff_time;
matrice matA (n1,n2);
cout<<"matrice iniziale con GPU"<<endl;
hipEventRecord(T3,0);
matA.Gpu_Init();
matA.sync_DeviceToHost();
hipEventRecord(T4,0);
hipEventSynchronize(T4);
hipEventElapsedTime(&diff_time,T3,T4);
cout << "tempo=" << diff_time<<"\n";
matA.Gpu_print();
cout<<"matrice iniziale con CPU"<<endl;
hipEventRecord(T1,0);
matA.Cpu_Init();
matA.sync_HostToDevice();
hipEventRecord(T2,0);
hipEventSynchronize(T2);
hipEventElapsedTime(&diff_time,T1,T2);
cout << "tempo=" << diff_time<<"\n";
matA.Cpu_print();
cout<<"matrice Hilbert con GPU"<<endl;
hipEventRecord(T3,0);
matA.Gpu_Hilbert_Init();
matA.sync_DeviceToHost();
hipEventRecord(T4,0);
hipEventSynchronize(T4);
hipEventElapsedTime(&diff_time,T3,T4);
cout << "tempo=" << diff_time<<"\n";
matA.Gpu_print();
cout<<"matrice Hilbert con CPU"<<endl;
hipEventRecord(T1,0);
matA.Cpu_Hilbert_Init();
matA.sync_HostToDevice();
hipEventRecord(T2,0);
hipEventSynchronize(T2);
hipEventElapsedTime(&diff_time,T1,T2);
cout << "tempo=" << diff_time<<"\n";
matA.Cpu_print();
cout<<"matrice iniziale random con GPU"<<endl;
hipEventRecord(T3,0);
matA.Gpu_Init_Rand(0.95,1.05);
matA.sync_DeviceToHost();
hipEventRecord(T4,0);
hipEventSynchronize(T4);
hipEventElapsedTime(&diff_time,T3,T4);
cout << "tempo=" << diff_time<<"\n";
matA.Gpu_print();
cout<<"matrice iniziale random con CPU"<<endl;
hipEventRecord(T1,0);
matA.Cpu_Init_Rand(0.95,1.05);
matA.sync_HostToDevice();
hipEventRecord(T2,0);
hipEventSynchronize(T2);
hipEventElapsedTime(&diff_time,T1,T2);
cout << "tempo=" << diff_time<<"\n";
matA.Cpu_print();
cout<<"Resize e Rinit con GPU"<<endl;
hipEventRecord(T3,0);
matA.Resize(10,10);
matA.Gpu_Init_Rand(0.95,1.05);
matA.sync_DeviceToHost();
hipEventRecord(T4,0);
hipEventSynchronize(T4);
hipEventElapsedTime(&diff_time,T3,T4);
cout << "tempo=" << diff_time<<"\n";
matA.Gpu_print();
cout<<"Resize con CPU"<<endl;
hipEventRecord(T1,0);
matA.Resize(10,10);
matA.Cpu_Init_Rand(0.95,1.05);
matA.sync_HostToDevice();
hipEventRecord(T2,0);
hipEventSynchronize(T2);
hipEventElapsedTime(&diff_time,T1,T2);
cout << "tempo=" << diff_time<<"\n";
matA.Cpu_print();
cout<<"Commenti:\nDa questa implementazione banale dell'inizializzazione si scopre che il hiprand della gpu non e' particolarmente efficiente, il motivo e' imputabile alla non ideale ottimizzazione del numero di thread scelto per la chiamata del kernel."<<endl;
hipEventDestroy(T1);
hipEventDestroy(T2);
hipEventDestroy(T3);
hipEventDestroy(T4);
return 0;
}
| f786aa0d6139e7a69d2198bad87277b6bcafb6d6.cu | /*//------------------------------------------------------------------------------------------------------------
| TEST delle funzioni di base della classe (funzionalita'):
| inizializzazione della matrice
| print
| inizializzazione Random
| resize
*///------------------------------------------------------------------------------------------------------------
#include <iostream>
#include <stdio.h>
using namespace std;
#include <cstdlib>
#include <stdlib.h> /* srand, rand */ //http://www.cplusplus.com/reference/cstdlib/rand/
#include <fstream> //http://www.cplusplus.com/doc/tutorial/files/
#include "Src/Ausiliary/CudaCrono.cuh"
#include "Src/Cuda_FloatMatrixClass.cuh"
int main(void){
int n1=5;
int n2=5;
//cout<<" #righe = ";cin>>n1;cout<<endl;
//cout<<" #colonne = ";cin>>n2;cout<<endl;
//Eventi per le statistiche
cudaEvent_t T1, T2, T3, T4;
cudaEventCreate(&T1);
cudaEventCreate(&T2);
cudaEventCreate(&T3);
cudaEventCreate(&T4);
float diff_time;
matrice matA (n1,n2);
cout<<"matrice iniziale con GPU"<<endl;
cudaEventRecord(T3,0);
matA.Gpu_Init();
matA.sync_DeviceToHost();
cudaEventRecord(T4,0);
cudaEventSynchronize(T4);
cudaEventElapsedTime(&diff_time,T3,T4);
cout << "tempo=" << diff_time<<"\n";
matA.Gpu_print();
cout<<"matrice iniziale con CPU"<<endl;
cudaEventRecord(T1,0);
matA.Cpu_Init();
matA.sync_HostToDevice();
cudaEventRecord(T2,0);
cudaEventSynchronize(T2);
cudaEventElapsedTime(&diff_time,T1,T2);
cout << "tempo=" << diff_time<<"\n";
matA.Cpu_print();
cout<<"matrice Hilbert con GPU"<<endl;
cudaEventRecord(T3,0);
matA.Gpu_Hilbert_Init();
matA.sync_DeviceToHost();
cudaEventRecord(T4,0);
cudaEventSynchronize(T4);
cudaEventElapsedTime(&diff_time,T3,T4);
cout << "tempo=" << diff_time<<"\n";
matA.Gpu_print();
cout<<"matrice Hilbert con CPU"<<endl;
cudaEventRecord(T1,0);
matA.Cpu_Hilbert_Init();
matA.sync_HostToDevice();
cudaEventRecord(T2,0);
cudaEventSynchronize(T2);
cudaEventElapsedTime(&diff_time,T1,T2);
cout << "tempo=" << diff_time<<"\n";
matA.Cpu_print();
cout<<"matrice iniziale random con GPU"<<endl;
cudaEventRecord(T3,0);
matA.Gpu_Init_Rand(0.95,1.05);
matA.sync_DeviceToHost();
cudaEventRecord(T4,0);
cudaEventSynchronize(T4);
cudaEventElapsedTime(&diff_time,T3,T4);
cout << "tempo=" << diff_time<<"\n";
matA.Gpu_print();
cout<<"matrice iniziale random con CPU"<<endl;
cudaEventRecord(T1,0);
matA.Cpu_Init_Rand(0.95,1.05);
matA.sync_HostToDevice();
cudaEventRecord(T2,0);
cudaEventSynchronize(T2);
cudaEventElapsedTime(&diff_time,T1,T2);
cout << "tempo=" << diff_time<<"\n";
matA.Cpu_print();
cout<<"Resize e Rinit con GPU"<<endl;
cudaEventRecord(T3,0);
matA.Resize(10,10);
matA.Gpu_Init_Rand(0.95,1.05);
matA.sync_DeviceToHost();
cudaEventRecord(T4,0);
cudaEventSynchronize(T4);
cudaEventElapsedTime(&diff_time,T3,T4);
cout << "tempo=" << diff_time<<"\n";
matA.Gpu_print();
cout<<"Resize con CPU"<<endl;
cudaEventRecord(T1,0);
matA.Resize(10,10);
matA.Cpu_Init_Rand(0.95,1.05);
matA.sync_HostToDevice();
cudaEventRecord(T2,0);
cudaEventSynchronize(T2);
cudaEventElapsedTime(&diff_time,T1,T2);
cout << "tempo=" << diff_time<<"\n";
matA.Cpu_print();
cout<<"Commenti:\nDa questa implementazione banale dell'inizializzazione si scopre che il curand della gpu non e' particolarmente efficiente, il motivo e' imputabile alla non ideale ottimizzazione del numero di thread scelto per la chiamata del kernel."<<endl;
cudaEventDestroy(T1);
cudaEventDestroy(T2);
cudaEventDestroy(T3);
cudaEventDestroy(T4);
return 0;
}
|
2f2f260eb31b70b049536aad8ade43e9c17947d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <chrono>
#define TILE_DIM 64
void gpuMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol);
void gpuNoMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol);
// __global__ void matrixMul(int* A_gpu, int* B_gpu, int* C_gpu, int N) {
// // Row i of matrix C
// int row = blockIdx.y * blockDim.y + threadIdx.y;
// // Column j of matrix C
// int col = blockIdx.x * blockDim.x + threadIdx.x;
// int accu = 0;
// if(row<N && col<N) {
// for(int k=0; k<N; k++) {
// accu = accu + A_gpu[row*N+k] * B_gpu[k*N+col];
// }
// C_gpu[row*N+col] = accu;
// }
// }
__global__ void matrixMul(int* A_cpu, int* B_cpu, int* C_cpu, int N)
{
int accu = 0;
int row = blockIdx.y*TILE_DIM + threadIdx.y;
int col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ int shd_A[TILE_DIM][TILE_DIM];
__shared__ int shd_B[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + N - 1)/TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < N && row < N)
shd_A[threadIdx.y][threadIdx.x] = A_cpu[row*N + k*TILE_DIM + threadIdx.x];
else
shd_A[threadIdx.y][threadIdx.x] = 0.0;
if (k*TILE_DIM + threadIdx.y < N && col < N)
shd_B[threadIdx.y][threadIdx.x] = B_cpu[(k*TILE_DIM + threadIdx.y)*N + col];
else
shd_B[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int k = 0; k<TILE_DIM; k++)
accu += shd_A[threadIdx.y][k] * shd_B[k][threadIdx.x];
__syncthreads();
}
if (row < N && col < N)
C_cpu[((blockIdx.y * blockDim.y + threadIdx.y)*N) +
(blockIdx.x * blockDim.x)+ threadIdx.x] = accu;
}
// __global__ void matrixMulCol(int* A_gpu, int* B_gpu, int* C_gpu, int N) {
// // Row i of matrix C
// int row = blockIdx.y * blockDim.y + threadIdx.y;
// // Column j of matrix C
// int col = blockIdx.x * blockDim.x + threadIdx.x;
// int accu = 0;
// if(row<N && col<N) {
// for(int k=0; k<N; k++) {
// accu = accu + A_gpu[k*N+row] * B_gpu[k*N+col];
// }
// C_gpu[row*N+col] = accu;
// }
// }
__global__ void matrixMulCol(int* A_cpu, int* B_cpu, int* C_cpu, int N)
{
int accu = 0;
int row = blockIdx.y*TILE_DIM + threadIdx.y;
int col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ int shd_A[TILE_DIM][TILE_DIM];
__shared__ int shd_B[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + N - 1)/TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < N && row < N)
shd_A[threadIdx.y][threadIdx.x] = A_cpu[(k*TILE_DIM + threadIdx.x)*N + row];
else
shd_A[threadIdx.y][threadIdx.x] = 0.0;
if (k*TILE_DIM + threadIdx.y < N && col < N)
shd_B[threadIdx.y][threadIdx.x] = B_cpu[(k*TILE_DIM + threadIdx.y)*N + col];
else
shd_B[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int k=0; k<TILE_DIM; k++)
accu += shd_A[threadIdx.y][k] * shd_B[k][threadIdx.x];
__syncthreads();
}
if (row < N && col < N)
C_cpu[((blockIdx.y * blockDim.y + threadIdx.y)*N) +
(blockIdx.x * blockDim.x)+ threadIdx.x] = accu;
}
void random_ints(int* x, int size)
{
srand(time(0));
int i;
for (i=0;i<size;i++) {
x[i]=rand()%10;
//std::cout << x[i] << " ";
}
}
void matrixMulCPU(int* A_cpu, int* B_cpu, int* C_cpu, int N) {
for(int row=0; row<N; row++) {
for(int col=0; col<N; col++){
C_cpu[row*N+col] = 0;
for(int elm=0; elm<N; elm++) {
C_cpu[row*N+col] = C_cpu[row*N+col] + A_cpu[row*N+elm] * B_cpu[elm*N+col];
}
}
}
}
int main(int argc, char* argv[]){
//int N = 3;
int N = atoi(argv[1]);
bool memCol = false;
if (strcmp(argv[4],"MC")==0) {
memCol=true;
}
int NN = N*N;
//define A_cpu, B_cpu, C_cpu in the CPU memory
int *A_cpu, *B_cpu, *C_cpu;
int size = NN * sizeof(int);
// Setup input values
//std::cout << "A: ";
A_cpu = (int*)malloc(size); random_ints(A_cpu, NN);
//std::cout << "\nB: ";
B_cpu = (int*)malloc(size); random_ints(B_cpu, NN);
C_cpu = (int*)malloc(size);
if (strcmp(argv[2],"gpu")==0) {
if(strcmp(argv[3],"MT")==0) {
gpuMemTransfer(A_cpu, B_cpu, C_cpu, N, size, memCol);
}
else {
gpuNoMemTransfer(A_cpu, B_cpu, C_cpu, N, size, memCol);
}
}
else {
auto t1 = std::chrono::high_resolution_clock::now();
matrixMulCPU(A_cpu, B_cpu, C_cpu, N);
auto t2 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count();
std::cout << "N: " << N << "\tCPU time: " << duration << "us" << std::endl;
}
//std::cout << "\nC: " << C_cpu[0] << " " << C_cpu[1] << " " <<C_cpu[2] << " " << C_cpu[3] << " " << C_cpu[4] <<" " << C_cpu[7] <<" " << C_cpu[8] <<"\n";
free(A_cpu); free(B_cpu); free(C_cpu);
return 0;
}
void gpuMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol) {
//define A_gpu, B_gpu, C_gpu in the GPU memory
//std::cout << "\nMem Tr\n";
int *A_gpu, *B_gpu, *C_gpu;
hipMalloc((void **)&A_gpu, size);
hipMalloc((void **)&B_gpu, size);
hipMalloc((void **)&C_gpu, size);
dim3 dimBlock(16, 16);
dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x, (N+dimBlock.y-1)/dimBlock.y);
float time = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
if (memCol==true) {
//std::cout << "MC\n";
hipEventRecord( start, 0 );
// Copy inputs to device
hipMemcpy(A_gpu, A_cpu, size, hipMemcpyHostToDevice);
hipMemcpy(B_gpu, B_cpu, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( matrixMulCol), dim3(dimGrid), dim3(dimBlock), 0, 0, A_gpu,B_gpu,C_gpu,N);
//memcopy C_gpu to C_cpu
hipMemcpy(C_cpu, C_gpu, size, hipMemcpyDeviceToHost);
//stop time
hipEventRecord( stop, 0 );
}
else {
//std::cout << "nmc\n";
hipEventRecord( start, 0 );
// Copy inputs to device
hipMemcpy(A_gpu, A_cpu, size, hipMemcpyHostToDevice);
hipMemcpy(B_gpu, B_cpu, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( matrixMul), dim3(dimGrid), dim3(dimBlock), 0, 0, A_gpu,B_gpu,C_gpu,N);
//memcopy C_gpu to C_cpu
hipMemcpy(C_cpu, C_gpu, size, hipMemcpyDeviceToHost);
//stop time
hipEventRecord( stop, 0 );
}
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
hipFree(A_gpu); hipFree(B_gpu); hipFree(C_gpu);
float microsec = (time)*1000;
std::cout << "N: " << N << "\tMT\t" << memCol << "\tGPU time: " << microsec << "us" << std::endl;
}
void gpuNoMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol) {
//define A_gpu, B_gpu, C_gpu in the GPU memory
//std::cout << "\nNoMem Tr\n";
int *A_gpu, *B_gpu, *C_gpu;
hipMalloc((void **)&A_gpu, size);
hipMalloc((void **)&B_gpu, size);
hipMalloc((void **)&C_gpu, size);
// Copy inputs to device
hipMemcpy(A_gpu, A_cpu, size, hipMemcpyHostToDevice);
hipMemcpy(B_gpu, B_cpu, size, hipMemcpyHostToDevice);
dim3 dimBlock(16, 16);
dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x, (N+dimBlock.y-1)/dimBlock.y);
float time = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
if (memCol==true) {
//std::cout << "MC\n";
hipEventRecord( start, 0 );
hipLaunchKernelGGL(( matrixMulCol), dim3(dimGrid), dim3(dimBlock), 0, 0, A_gpu,B_gpu,C_gpu,N);
hipEventRecord( stop, 0 );
}
else {
//std::cout << "nmc\n";
hipEventRecord( start, 0 );
hipLaunchKernelGGL(( matrixMul), dim3(dimGrid), dim3(dimBlock), 0, 0, A_gpu,B_gpu,C_gpu,N);
hipEventRecord( stop, 0 );
}
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
//memcopy C_gpu to C_cpu
hipMemcpy(C_cpu, C_gpu, size, hipMemcpyDeviceToHost);
hipFree(A_gpu); hipFree(B_gpu); hipFree(C_gpu);
float microsec = (time)*1000;
std::cout << "N: " << N << "\tnt\t" << memCol << "\tGPU time: " << microsec << "us" << std::endl;
}
| 2f2f260eb31b70b049536aad8ade43e9c17947d9.cu | #include <stdio.h>
#include <stdlib.h>
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <chrono>
#define TILE_DIM 64
void gpuMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol);
void gpuNoMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol);
// __global__ void matrixMul(int* A_gpu, int* B_gpu, int* C_gpu, int N) {
// // Row i of matrix C
// int row = blockIdx.y * blockDim.y + threadIdx.y;
// // Column j of matrix C
// int col = blockIdx.x * blockDim.x + threadIdx.x;
// int accu = 0;
// if(row<N && col<N) {
// for(int k=0; k<N; k++) {
// accu = accu + A_gpu[row*N+k] * B_gpu[k*N+col];
// }
// C_gpu[row*N+col] = accu;
// }
// }
__global__ void matrixMul(int* A_cpu, int* B_cpu, int* C_cpu, int N)
{
int accu = 0;
int row = blockIdx.y*TILE_DIM + threadIdx.y;
int col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ int shd_A[TILE_DIM][TILE_DIM];
__shared__ int shd_B[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + N - 1)/TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < N && row < N)
shd_A[threadIdx.y][threadIdx.x] = A_cpu[row*N + k*TILE_DIM + threadIdx.x];
else
shd_A[threadIdx.y][threadIdx.x] = 0.0;
if (k*TILE_DIM + threadIdx.y < N && col < N)
shd_B[threadIdx.y][threadIdx.x] = B_cpu[(k*TILE_DIM + threadIdx.y)*N + col];
else
shd_B[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int k = 0; k<TILE_DIM; k++)
accu += shd_A[threadIdx.y][k] * shd_B[k][threadIdx.x];
__syncthreads();
}
if (row < N && col < N)
C_cpu[((blockIdx.y * blockDim.y + threadIdx.y)*N) +
(blockIdx.x * blockDim.x)+ threadIdx.x] = accu;
}
// __global__ void matrixMulCol(int* A_gpu, int* B_gpu, int* C_gpu, int N) {
// // Row i of matrix C
// int row = blockIdx.y * blockDim.y + threadIdx.y;
// // Column j of matrix C
// int col = blockIdx.x * blockDim.x + threadIdx.x;
// int accu = 0;
// if(row<N && col<N) {
// for(int k=0; k<N; k++) {
// accu = accu + A_gpu[k*N+row] * B_gpu[k*N+col];
// }
// C_gpu[row*N+col] = accu;
// }
// }
__global__ void matrixMulCol(int* A_cpu, int* B_cpu, int* C_cpu, int N)
{
int accu = 0;
int row = blockIdx.y*TILE_DIM + threadIdx.y;
int col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ int shd_A[TILE_DIM][TILE_DIM];
__shared__ int shd_B[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + N - 1)/TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < N && row < N)
shd_A[threadIdx.y][threadIdx.x] = A_cpu[(k*TILE_DIM + threadIdx.x)*N + row];
else
shd_A[threadIdx.y][threadIdx.x] = 0.0;
if (k*TILE_DIM + threadIdx.y < N && col < N)
shd_B[threadIdx.y][threadIdx.x] = B_cpu[(k*TILE_DIM + threadIdx.y)*N + col];
else
shd_B[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int k=0; k<TILE_DIM; k++)
accu += shd_A[threadIdx.y][k] * shd_B[k][threadIdx.x];
__syncthreads();
}
if (row < N && col < N)
C_cpu[((blockIdx.y * blockDim.y + threadIdx.y)*N) +
(blockIdx.x * blockDim.x)+ threadIdx.x] = accu;
}
void random_ints(int* x, int size)
{
srand(time(0));
int i;
for (i=0;i<size;i++) {
x[i]=rand()%10;
//std::cout << x[i] << " ";
}
}
void matrixMulCPU(int* A_cpu, int* B_cpu, int* C_cpu, int N) {
for(int row=0; row<N; row++) {
for(int col=0; col<N; col++){
C_cpu[row*N+col] = 0;
for(int elm=0; elm<N; elm++) {
C_cpu[row*N+col] = C_cpu[row*N+col] + A_cpu[row*N+elm] * B_cpu[elm*N+col];
}
}
}
}
int main(int argc, char* argv[]){
//int N = 3;
int N = atoi(argv[1]);
bool memCol = false;
if (strcmp(argv[4],"MC")==0) {
memCol=true;
}
int NN = N*N;
//define A_cpu, B_cpu, C_cpu in the CPU memory
int *A_cpu, *B_cpu, *C_cpu;
int size = NN * sizeof(int);
// Setup input values
//std::cout << "A: ";
A_cpu = (int*)malloc(size); random_ints(A_cpu, NN);
//std::cout << "\nB: ";
B_cpu = (int*)malloc(size); random_ints(B_cpu, NN);
C_cpu = (int*)malloc(size);
if (strcmp(argv[2],"gpu")==0) {
if(strcmp(argv[3],"MT")==0) {
gpuMemTransfer(A_cpu, B_cpu, C_cpu, N, size, memCol);
}
else {
gpuNoMemTransfer(A_cpu, B_cpu, C_cpu, N, size, memCol);
}
}
else {
auto t1 = std::chrono::high_resolution_clock::now();
matrixMulCPU(A_cpu, B_cpu, C_cpu, N);
auto t2 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count();
std::cout << "N: " << N << "\tCPU time: " << duration << "us" << std::endl;
}
//std::cout << "\nC: " << C_cpu[0] << " " << C_cpu[1] << " " <<C_cpu[2] << " " << C_cpu[3] << " " << C_cpu[4] <<" " << C_cpu[7] <<" " << C_cpu[8] <<"\n";
free(A_cpu); free(B_cpu); free(C_cpu);
return 0;
}
void gpuMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol) {
//define A_gpu, B_gpu, C_gpu in the GPU memory
//std::cout << "\nMem Tr\n";
int *A_gpu, *B_gpu, *C_gpu;
cudaMalloc((void **)&A_gpu, size);
cudaMalloc((void **)&B_gpu, size);
cudaMalloc((void **)&C_gpu, size);
dim3 dimBlock(16, 16);
dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x, (N+dimBlock.y-1)/dimBlock.y);
float time = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if (memCol==true) {
//std::cout << "MC\n";
cudaEventRecord( start, 0 );
// Copy inputs to device
cudaMemcpy(A_gpu, A_cpu, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B_cpu, size, cudaMemcpyHostToDevice);
matrixMulCol<<<dimGrid, dimBlock>>>(A_gpu,B_gpu,C_gpu,N);
//memcopy C_gpu to C_cpu
cudaMemcpy(C_cpu, C_gpu, size, cudaMemcpyDeviceToHost);
//stop time
cudaEventRecord( stop, 0 );
}
else {
//std::cout << "nmc\n";
cudaEventRecord( start, 0 );
// Copy inputs to device
cudaMemcpy(A_gpu, A_cpu, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B_cpu, size, cudaMemcpyHostToDevice);
matrixMul<<<dimGrid, dimBlock>>>(A_gpu,B_gpu,C_gpu,N);
//memcopy C_gpu to C_cpu
cudaMemcpy(C_cpu, C_gpu, size, cudaMemcpyDeviceToHost);
//stop time
cudaEventRecord( stop, 0 );
}
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaFree(A_gpu); cudaFree(B_gpu); cudaFree(C_gpu);
float microsec = (time)*1000;
std::cout << "N: " << N << "\tMT\t" << memCol << "\tGPU time: " << microsec << "us" << std::endl;
}
void gpuNoMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol) {
//define A_gpu, B_gpu, C_gpu in the GPU memory
//std::cout << "\nNoMem Tr\n";
int *A_gpu, *B_gpu, *C_gpu;
cudaMalloc((void **)&A_gpu, size);
cudaMalloc((void **)&B_gpu, size);
cudaMalloc((void **)&C_gpu, size);
// Copy inputs to device
cudaMemcpy(A_gpu, A_cpu, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B_cpu, size, cudaMemcpyHostToDevice);
dim3 dimBlock(16, 16);
dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x, (N+dimBlock.y-1)/dimBlock.y);
float time = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if (memCol==true) {
//std::cout << "MC\n";
cudaEventRecord( start, 0 );
matrixMulCol<<<dimGrid, dimBlock>>>(A_gpu,B_gpu,C_gpu,N);
cudaEventRecord( stop, 0 );
}
else {
//std::cout << "nmc\n";
cudaEventRecord( start, 0 );
matrixMul<<<dimGrid, dimBlock>>>(A_gpu,B_gpu,C_gpu,N);
cudaEventRecord( stop, 0 );
}
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
//memcopy C_gpu to C_cpu
cudaMemcpy(C_cpu, C_gpu, size, cudaMemcpyDeviceToHost);
cudaFree(A_gpu); cudaFree(B_gpu); cudaFree(C_gpu);
float microsec = (time)*1000;
std::cout << "N: " << N << "\tnt\t" << memCol << "\tGPU time: " << microsec << "us" << std::endl;
}
|
6c1c602b85229385cdd12521831466fbe848fbfb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuRBM.h"
__constant__ unsigned nCase;
__constant__ float *data_vis, *data_hid;
__constant__ float *data_v_reco[10];
__device__ float my_rand(unsigned int *seed) {
// constants for random no gen.
unsigned long a = 16807;
unsigned long m = 2147483647; // 2^31 - 1
unsigned long x = (unsigned long) *seed;
x = (a * x)%m;
*seed = (unsigned int) x;
return ((float)x)/m;
}
__global__ void bias(float *c, float *bb, unsigned offset, unsigned nVH, unsigned sb){
extern __shared__ float vh_bias[];
if(blockDim.x * blockIdx.x + threadIdx.x < sb){
unsigned c_idx = offset + blockDim.x * blockIdx.x + threadIdx.x;
vh_bias[threadIdx.x] = bb[c_idx];
for(; c_idx < nCase * nVH; c_idx += nVH)
c[c_idx] += vh_bias[threadIdx.x];
}
}
__global__ void biasSampling(float *c, float *bb, unsigned offset, unsigned nVH, unsigned sb){
extern __shared__ float vh_bias[];
if(blockDim.x * blockIdx.x + threadIdx.x < sb){
unsigned c_idx = offset + blockDim.x * blockIdx.x + threadIdx.x;
vh_bias[threadIdx.x] = bb[c_idx];
for(; c_idx < nCase * nVH; c_idx += nVH){
if(my_rand(&c_idx) > 1/(1 + exp(-c[c_idx] - vh_bias[threadIdx.x])))
c[c_idx] = 0;
else
c[c_idx] = 1;
}
}
}
__global__ void sumUpVisReco(int ns, unsigned len, float *c){
for(unsigned i = blockDim.x * blockIdx.x + threadIdx.x; i < len; i += blockDim.x * gridDim.x){
float s =.0;
for(int j = 0; j < ns; ++ j)
s += *(data_v_reco[j] + i);
c[i] = s;
}
}
float *d_weight, *d_a, *d_b;
float *d_data_v, *d_data_h, *d_data_v_reco, * d_data_h_reco;
float *dev_data_v_reco[10];
float *d_vis_data, *d_vis_reco, *d_hid_data, *d_hid_reco, *d_ones;
const float alpha = 1.0f;
const float beta = .0f;
const float beta_one = 1.0f;
unsigned currentBatch;
//const float learn_rate = 0.0001;
const float learn_rate = 10;
//const float learn_rate_neg = -0.0001;
const float learn_rate_neg = -10;
hipblasHandle_t handle;
hiprandGenerator_t gen;
hipStream_t *strm;
hipEvent_t *evt;
void deviceMemoryAlloc();
void deviceMemoryFree();
//unsigned copyMiniBatchToDevice(int idx_batch, hipStream_t *s){
unsigned copyMiniBatchToDevice(int idx_batch){
/* copy mini batch */
unsigned nBatch = h_miniBatch > (ninst - idx_batch)? (ninst - idx_batch): h_miniBatch;
//CUBLAS_HANDLE_ERROR(hipblasSetStream(handle, *s));
CUBLAS_HANDLE_ERROR(hipblasSetMatrix(nvisible, nBatch, sizeof(float),
h_data + idx_batch * nvisible, nvisible, d_data_v, nvisible));
//HANDLE_ERROR(hipMemcpyToSymbolAsync(nCase, &nBatch, sizeof(unsigned), 0,
// hipMemcpyHostToDevice, *s));
HANDLE_ERROR(hipMemcpyToSymbol(nCase, &nBatch, sizeof(unsigned), 0,
hipMemcpyHostToDevice));
return nBatch;
}
/*
void calcUnits(unsigned nunits, float *dev_data, float *b, int sampled, hipStream_t *s){
//dim3 g(currentBatch, (nunits- 1)/256 + 1);
if(sampled)
addBiasAndSampling<<<(nunits- 1)/256 + 1, 256, 256*sizeof(float), *s>>>(nunits, dev_data, b);
else
addBias<<<(nunits- 1)/256 + 1, 256, 256*sizeof(float), *s>>>(nunits, dev_data, b);
hipError_t ret = hipGetLastError();
HANDLE_ERROR(ret);
}
*/
void calcViHj(float *dev_v, float *dev_h){
/* calculate (Hi)data/reco and (Vi)data/reco */
const float avg_alpha = 1.0/currentBatch;
hipblasStatus_t ret;
ret = hipblasSgemv(handle, HIPBLAS_OP_N, nvisible, currentBatch, &avg_alpha, d_data_v, nvisible, d_ones, 1, &beta, dev_v, 1);
CUBLAS_HANDLE_ERROR(ret);
ret = hipblasSgemv(handle, HIPBLAS_OP_N, nhidden, currentBatch, &avg_alpha, d_data_h, nhidden, d_ones, 1, &beta, dev_h, 1);
CUBLAS_HANDLE_ERROR(ret);
}
void calcVHij(unit_t u, unsigned offset, unsigned len){
/* calculate (Hi)data/reco and (Vi)data/reco */
const float avg_alpha = 1.0/currentBatch;
float *vhij, *dev_data_vh;
int stride;
switch (u){
case VISIBLE:
vhij = d_vis_data + offset;
dev_data_vh = d_data_v + offset;
stride = nvisible;
break;
case HIDDEN:
vhij = d_hid_data + offset;
dev_data_vh = d_data_h + offset;
stride = nhidden;
break;
case VISIBLE_RECO:
vhij = d_vis_reco + offset;
dev_data_vh = d_data_v_reco + offset;
stride = nvisible;
break;
case HIDDEN_RECO:
vhij = d_hid_reco + offset;
dev_data_vh = d_data_h + offset;
stride = nhidden;
break;
default:
break;
}
hipblasStatus_t ret;
ret = hipblasSgemv(handle, HIPBLAS_OP_N, len, currentBatch, &avg_alpha, dev_data_vh, stride, d_ones, 1, &beta, vhij, 1);
CUBLAS_HANDLE_ERROR(ret);
}
void deviceMemoryAlloc(){
// allocate for visible & hidden data
HANDLE_ERROR(hipMalloc((void **)&d_data_v, h_miniBatch * nvisible * sizeof(float)));
//HANDLE_ERROR(hipMemcpyToSymbol(data_vis, &d_data_v, sizeof(float *), 0, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMalloc((void **)&d_data_h, h_miniBatch * nhidden * sizeof(float)));
//HANDLE_ERROR(hipMemcpyToSymbol(data_hid, &d_data_h, sizeof(float *), 0, hipMemcpyHostToDevice));
for(int j = 0; j < nStream; ++ j)
HANDLE_ERROR(hipMalloc((void **)&dev_data_v_reco[j], h_miniBatch * nvisible * sizeof(float)));
HANDLE_ERROR(hipMemcpyToSymbol(data_v_reco, &dev_data_v_reco, nStream * sizeof(float *), 0, hipMemcpyHostToDevice));
// allocate for vis/hid reconstruction
HANDLE_ERROR(hipMalloc((void **)&d_data_v_reco, h_miniBatch * nvisible * sizeof(float)));
//HANDLE_ERROR(hipMalloc((void **)&d_data_h_reco, h_miniBatch * nhidden * sizeof(float)));
// weights
HANDLE_ERROR(hipMalloc((void **)&d_weight, nvisible * nStream * streamBatch * sizeof(float)));
// bias to global memory
HANDLE_ERROR(hipMalloc((void **)&d_a, nvisible * sizeof(float)));
HANDLE_ERROR(hipMemcpy(d_a, h_a, nvisible * sizeof(float), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMalloc((void **)&d_b, nhidden * sizeof(float)));
HANDLE_ERROR(hipMemcpy(d_b, h_b, nhidden * sizeof(float), hipMemcpyHostToDevice));
// allocate & copy ones vector
float *h_ones = (float *)malloc(h_miniBatch * sizeof(float));
fill_n (h_ones, h_miniBatch, 1);
HANDLE_ERROR(hipMalloc((void **)&d_ones, h_miniBatch * sizeof(float)));
HANDLE_ERROR(hipMemcpy(d_ones, h_ones, h_miniBatch * sizeof(float), hipMemcpyHostToDevice));
free(h_ones);
// allocate for Vi Hj
HANDLE_ERROR(hipMalloc((void **)&d_vis_data, nvisible * sizeof(float)));
HANDLE_ERROR(hipMalloc((void **)&d_vis_reco, nvisible * sizeof(float)));
HANDLE_ERROR(hipMalloc((void **)&d_hid_data, nhidden * sizeof(float)));
HANDLE_ERROR(hipMalloc((void **)&d_hid_reco, nhidden * sizeof(float)));
}
void deviceMemoryFree(){
HANDLE_ERROR(hipFree(d_data_v));
HANDLE_ERROR(hipFree(d_data_h));
HANDLE_ERROR(hipFree(d_weight));
HANDLE_ERROR(hipFree(d_a));
HANDLE_ERROR(hipFree(d_b));
//HANDLE_ERROR(hipFree(d_rand));
HANDLE_ERROR(hipFree(d_ones));
HANDLE_ERROR(hipFree(d_vis_data));
HANDLE_ERROR(hipFree(d_hid_data));
HANDLE_ERROR(hipFree(d_vis_reco));
HANDLE_ERROR(hipFree(d_hid_reco));
}
void updateBias(unit_t u, unsigned offset, unsigned len){
float *d_bias, *d_data, *d_reco;
if(u == VISIBLE){
d_bias = d_a + offset;
d_data = d_vis_data;
d_reco = d_vis_reco;
}
else{
d_bias = d_b + offset;
d_data = d_hid_data;
d_reco = d_vis_reco;
}
hipblasStatus_t ret;
ret = hipblasSaxpy(handle, len, &learn_rate, d_data, 1, d_bias, 1);
CUBLAS_HANDLE_ERROR(ret);
ret = hipblasSaxpy(handle, len, &learn_rate_neg, d_reco, 1, d_bias, 1);
CUBLAS_HANDLE_ERROR(ret);
}
void updateWeight(int offset, int len, float *dev_w){
hipblasStatus_t ret;
ret = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T,
nvisible, len, 1, &learn_rate,
d_vis_data, nvisible, d_hid_data + offset,
len, &beta_one, dev_w, nvisible);
CUBLAS_HANDLE_ERROR(ret);
ret = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T,
nvisible, len, 1, &learn_rate_neg,
d_vis_reco, nvisible, d_hid_reco + offset,
len, &beta_one, dev_w, nvisible);
CUBLAS_HANDLE_ERROR(ret);
}
void phase1_TillVisibleRecon(int idx_strm){
unsigned currentStreamBatch;
float *d_weight_strm = d_weight + idx_strm * streamBatch * nvisible;
hipblasStatus_t ret;
for(unsigned streamBatch_start = idx_strm * streamBatch; streamBatch_start < nhidden; streamBatch_start += nStream * streamBatch){
/* calculate starting position and length */
if(streamBatch_start + streamBatch > nhidden)
currentStreamBatch = nhidden - streamBatch_start;
else
currentStreamBatch = streamBatch;
/* copy partial weights */
float *h_weight_strm = h_weight + streamBatch_start * nvisible;
CUBLAS_HANDLE_ERROR(hipblasSetMatrixAsync(nvisible, currentStreamBatch,
sizeof(float), h_weight_strm, nvisible, d_weight_strm, nvisible, strm[idx_strm]));
/* matrix multiplication for hidden units calculation */
float *d_data_h_strm = d_data_h + streamBatch_start;
ret = hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N,
currentStreamBatch, currentBatch, nvisible, &alpha,
d_weight_strm, nvisible, d_data_v, nvisible, &beta, d_data_h_strm, nhidden);
CUBLAS_HANDLE_ERROR(ret);
/* add bias and sampling */
hipLaunchKernelGGL(( bias), dim3((currentStreamBatch - 1)/256 + 1), dim3(256), 256*sizeof(float), strm[idx_strm], d_data_h, d_b, streamBatch_start, nhidden, currentStreamBatch);
hipError_t cuda_ret = hipGetLastError();
HANDLE_ERROR(cuda_ret);
/* calculate H_j_data */
calcVHij(HIDDEN, streamBatch_start, currentStreamBatch);
/* partially reconstruct visible units */
if(streamBatch_start < nStream * streamBatch)
ret = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
nvisible, currentBatch, currentStreamBatch, &alpha,
d_weight_strm, nvisible, d_data_h_strm, nhidden, &beta, dev_data_v_reco[idx_strm], nvisible);
else
ret = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
nvisible, currentBatch, currentStreamBatch, &alpha,
d_weight_strm, nvisible, d_data_h_strm, nhidden, &beta_one, dev_data_v_reco[idx_strm], nvisible);
CUBLAS_HANDLE_ERROR(ret);
}
}
void phase2(int idx_strm){
unsigned currentStreamBatch;
float *d_weight_strm = d_weight + idx_strm * nvisible * streamBatch;
for(unsigned streamBatch_start = idx_strm * streamBatch; streamBatch_start < nhidden; streamBatch_start += nStream * streamBatch){
/* calculate starting position and length */
if(streamBatch_start + streamBatch > nhidden)
currentStreamBatch = nhidden - streamBatch_start;
else
currentStreamBatch = streamBatch;
/* copy partial weights */
float *h_weight_strm = h_weight + streamBatch_start * nvisible;
CUBLAS_HANDLE_ERROR(hipblasSetMatrixAsync(nvisible, currentStreamBatch,
sizeof(float), h_weight_strm, nvisible, d_weight_strm, nvisible, strm[idx_strm]));
/* matrix multiplication for hidden units calculation */
float *d_data_h_strm = d_data_h + streamBatch_start;
hipblasStatus_t ret = hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N,
currentStreamBatch, currentBatch, nvisible, &alpha,
d_weight_strm, nvisible, d_data_v_reco, nvisible, &beta, d_data_h_strm, nhidden);
CUBLAS_HANDLE_ERROR(ret);
/* add bias and sampling */
hipLaunchKernelGGL(( bias), dim3((currentStreamBatch - 1)/256 + 1), dim3(256), 256*sizeof(float), strm[idx_strm], d_data_h, d_b, streamBatch_start, nhidden, currentStreamBatch);
hipError_t cuda_ret = hipGetLastError();
HANDLE_ERROR(cuda_ret);
/* calculate H_j_reco */
calcVHij(HIDDEN_RECO, streamBatch_start, currentStreamBatch);
/* update bias for hidden */
updateBias(HIDDEN, streamBatch_start, currentStreamBatch);
/* update weights */
updateWeight(streamBatch_start, currentStreamBatch, d_weight_strm);
/* copy the new weights back to host */
CUBLAS_HANDLE_ERROR(hipblasGetMatrixAsync(nvisible, currentStreamBatch,
sizeof(float), d_weight_strm, nvisible, h_weight_strm, nvisible, strm[idx_strm]));
}
}
void cublasRunRBM(){
// data
float *h_data_h = (float *)malloc(sizeof(float) * nhidden* nvisible);
float msecTotal = 0.0f;
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start, NULL));
hipblasStatus_t ret;
ret = hipblasCreate(&handle);
CUBLAS_HANDLE_ERROR(ret);
deviceMemoryAlloc();
/* initialize streams and events */
strm = (hipStream_t *)malloc(nStream * sizeof(hipStream_t));
evt = (hipEvent_t *)malloc(nStream * sizeof(hipEvent_t));
for(int j = 0; j < nStream; ++ j){
HANDLE_ERROR(hipStreamCreate(&strm[j]));
HANDLE_ERROR(hipEventCreate(&evt[j]));
}
/* main loop over all samples by mini-batch */
for(unsigned i = 0; i < ninst; i += h_miniBatch){
/* copy mini-batch in default stream */
CUBLAS_HANDLE_ERROR(hipblasSetStream(handle, NULL));
currentBatch = copyMiniBatchToDevice(i);
/* sync for mini-batch copy */
hipDeviceSynchronize();
/* calculate V_i_data */
calcVHij(VISIBLE, 0, nvisible);
/* concurrent streams */
for(int j = 0; j < nStream; ++ j){
CUBLAS_HANDLE_ERROR(hipblasSetStream(handle, strm[j]));
phase1_TillVisibleRecon(j);
}
/* sync for visible recon matrix by all streams and sum up
return to default NULL stream, implicit sync */
int streamUsed;
if(1.0*nhidden/streamBatch > (nStream - 1))
streamUsed = nStream;
else
streamUsed = (nhidden - 1)/streamBatch + 1;
hipLaunchKernelGGL(( sumUpVisReco), dim3((currentBatch * nvisible)/256 + 1), dim3(256), 0, 0, streamUsed, currentBatch * nvisible, d_data_v_reco);
hipLaunchKernelGGL(( bias), dim3((nvisible - 1)/256 + 1), dim3(256), 256*sizeof(float), 0, d_data_v_reco, d_a, 0, nvisible, nvisible);
//hipDeviceSynchronize();
hipError_t cuda_ret = hipGetLastError();
HANDLE_ERROR(cuda_ret);
/* calculate V_i_reco */
CUBLAS_HANDLE_ERROR(hipblasSetStream(handle, NULL));
calcVHij(VISIBLE_RECO, 0, nvisible);
/* update bias for visible */
updateBias(VISIBLE, 0, nvisible);
hipDeviceSynchronize();
/* concurrent streams */
for(int j = 0; j < nStream; ++ j){
CUBLAS_HANDLE_ERROR(hipblasSetStream(handle, strm[j]));
phase2(j);
}
}
hipDeviceSynchronize();
hipblasDestroy(handle);
//unsigned row = currentBatch;
//unsigned row = 1;
//HANDLE_ERROR(hipMemcpy(h_data_h, d_weight, sizeof(float)*row*col, hipMemcpyDeviceToHost));
//printArray(h_data_h, row, col);
//printArray(h_weight, row, col);
//printArray(eigen_data_h, row, col);
//cout << "sqare norm: " << sqn(h_data_h, eigen_data_h, row * col) << endl;
/*
unsigned row = nvisible;
unsigned col = nhidden;
cout << "sqare norm: " << sqn(h_weight, eigen_data_h, row * col) << endl;
*/
HANDLE_ERROR(hipEventRecord(stop, NULL));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&msecTotal, start, stop));
printf("\tcublas: %.2f msec\n", msecTotal);
deviceMemoryFree();
free(h_data_h);
}
| 6c1c602b85229385cdd12521831466fbe848fbfb.cu | #include "cuRBM.h"
__constant__ unsigned nCase;
__constant__ float *data_vis, *data_hid;
__constant__ float *data_v_reco[10];
__device__ float my_rand(unsigned int *seed) {
// constants for random no gen.
unsigned long a = 16807;
unsigned long m = 2147483647; // 2^31 - 1
unsigned long x = (unsigned long) *seed;
x = (a * x)%m;
*seed = (unsigned int) x;
return ((float)x)/m;
}
__global__ void bias(float *c, float *bb, unsigned offset, unsigned nVH, unsigned sb){
extern __shared__ float vh_bias[];
if(blockDim.x * blockIdx.x + threadIdx.x < sb){
unsigned c_idx = offset + blockDim.x * blockIdx.x + threadIdx.x;
vh_bias[threadIdx.x] = bb[c_idx];
for(; c_idx < nCase * nVH; c_idx += nVH)
c[c_idx] += vh_bias[threadIdx.x];
}
}
__global__ void biasSampling(float *c, float *bb, unsigned offset, unsigned nVH, unsigned sb){
extern __shared__ float vh_bias[];
if(blockDim.x * blockIdx.x + threadIdx.x < sb){
unsigned c_idx = offset + blockDim.x * blockIdx.x + threadIdx.x;
vh_bias[threadIdx.x] = bb[c_idx];
for(; c_idx < nCase * nVH; c_idx += nVH){
if(my_rand(&c_idx) > 1/(1 + exp(-c[c_idx] - vh_bias[threadIdx.x])))
c[c_idx] = 0;
else
c[c_idx] = 1;
}
}
}
__global__ void sumUpVisReco(int ns, unsigned len, float *c){
for(unsigned i = blockDim.x * blockIdx.x + threadIdx.x; i < len; i += blockDim.x * gridDim.x){
float s =.0;
for(int j = 0; j < ns; ++ j)
s += *(data_v_reco[j] + i);
c[i] = s;
}
}
float *d_weight, *d_a, *d_b;
float *d_data_v, *d_data_h, *d_data_v_reco, * d_data_h_reco;
float *dev_data_v_reco[10];
float *d_vis_data, *d_vis_reco, *d_hid_data, *d_hid_reco, *d_ones;
const float alpha = 1.0f;
const float beta = .0f;
const float beta_one = 1.0f;
unsigned currentBatch;
//const float learn_rate = 0.0001;
const float learn_rate = 10;
//const float learn_rate_neg = -0.0001;
const float learn_rate_neg = -10;
cublasHandle_t handle;
curandGenerator_t gen;
cudaStream_t *strm;
cudaEvent_t *evt;
void deviceMemoryAlloc();
void deviceMemoryFree();
//unsigned copyMiniBatchToDevice(int idx_batch, cudaStream_t *s){
unsigned copyMiniBatchToDevice(int idx_batch){
/* copy mini batch */
unsigned nBatch = h_miniBatch > (ninst - idx_batch)? (ninst - idx_batch): h_miniBatch;
//CUBLAS_HANDLE_ERROR(cublasSetStream(handle, *s));
CUBLAS_HANDLE_ERROR(cublasSetMatrix(nvisible, nBatch, sizeof(float),
h_data + idx_batch * nvisible, nvisible, d_data_v, nvisible));
//HANDLE_ERROR(cudaMemcpyToSymbolAsync(nCase, &nBatch, sizeof(unsigned), 0,
// cudaMemcpyHostToDevice, *s));
HANDLE_ERROR(cudaMemcpyToSymbol(nCase, &nBatch, sizeof(unsigned), 0,
cudaMemcpyHostToDevice));
return nBatch;
}
/*
void calcUnits(unsigned nunits, float *dev_data, float *b, int sampled, cudaStream_t *s){
//dim3 g(currentBatch, (nunits- 1)/256 + 1);
if(sampled)
addBiasAndSampling<<<(nunits- 1)/256 + 1, 256, 256*sizeof(float), *s>>>(nunits, dev_data, b);
else
addBias<<<(nunits- 1)/256 + 1, 256, 256*sizeof(float), *s>>>(nunits, dev_data, b);
cudaError_t ret = cudaGetLastError();
HANDLE_ERROR(ret);
}
*/
void calcViHj(float *dev_v, float *dev_h){
/* calculate (Hi)data/reco and (Vi)data/reco */
const float avg_alpha = 1.0/currentBatch;
cublasStatus_t ret;
ret = cublasSgemv(handle, CUBLAS_OP_N, nvisible, currentBatch, &avg_alpha, d_data_v, nvisible, d_ones, 1, &beta, dev_v, 1);
CUBLAS_HANDLE_ERROR(ret);
ret = cublasSgemv(handle, CUBLAS_OP_N, nhidden, currentBatch, &avg_alpha, d_data_h, nhidden, d_ones, 1, &beta, dev_h, 1);
CUBLAS_HANDLE_ERROR(ret);
}
void calcVHij(unit_t u, unsigned offset, unsigned len){
/* calculate (Hi)data/reco and (Vi)data/reco */
const float avg_alpha = 1.0/currentBatch;
float *vhij, *dev_data_vh;
int stride;
switch (u){
case VISIBLE:
vhij = d_vis_data + offset;
dev_data_vh = d_data_v + offset;
stride = nvisible;
break;
case HIDDEN:
vhij = d_hid_data + offset;
dev_data_vh = d_data_h + offset;
stride = nhidden;
break;
case VISIBLE_RECO:
vhij = d_vis_reco + offset;
dev_data_vh = d_data_v_reco + offset;
stride = nvisible;
break;
case HIDDEN_RECO:
vhij = d_hid_reco + offset;
dev_data_vh = d_data_h + offset;
stride = nhidden;
break;
default:
break;
}
cublasStatus_t ret;
ret = cublasSgemv(handle, CUBLAS_OP_N, len, currentBatch, &avg_alpha, dev_data_vh, stride, d_ones, 1, &beta, vhij, 1);
CUBLAS_HANDLE_ERROR(ret);
}
void deviceMemoryAlloc(){
// allocate for visible & hidden data
HANDLE_ERROR(cudaMalloc((void **)&d_data_v, h_miniBatch * nvisible * sizeof(float)));
//HANDLE_ERROR(cudaMemcpyToSymbol(data_vis, &d_data_v, sizeof(float *), 0, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMalloc((void **)&d_data_h, h_miniBatch * nhidden * sizeof(float)));
//HANDLE_ERROR(cudaMemcpyToSymbol(data_hid, &d_data_h, sizeof(float *), 0, cudaMemcpyHostToDevice));
for(int j = 0; j < nStream; ++ j)
HANDLE_ERROR(cudaMalloc((void **)&dev_data_v_reco[j], h_miniBatch * nvisible * sizeof(float)));
HANDLE_ERROR(cudaMemcpyToSymbol(data_v_reco, &dev_data_v_reco, nStream * sizeof(float *), 0, cudaMemcpyHostToDevice));
// allocate for vis/hid reconstruction
HANDLE_ERROR(cudaMalloc((void **)&d_data_v_reco, h_miniBatch * nvisible * sizeof(float)));
//HANDLE_ERROR(cudaMalloc((void **)&d_data_h_reco, h_miniBatch * nhidden * sizeof(float)));
// weights
HANDLE_ERROR(cudaMalloc((void **)&d_weight, nvisible * nStream * streamBatch * sizeof(float)));
// bias to global memory
HANDLE_ERROR(cudaMalloc((void **)&d_a, nvisible * sizeof(float)));
HANDLE_ERROR(cudaMemcpy(d_a, h_a, nvisible * sizeof(float), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMalloc((void **)&d_b, nhidden * sizeof(float)));
HANDLE_ERROR(cudaMemcpy(d_b, h_b, nhidden * sizeof(float), cudaMemcpyHostToDevice));
// allocate & copy ones vector
float *h_ones = (float *)malloc(h_miniBatch * sizeof(float));
fill_n (h_ones, h_miniBatch, 1);
HANDLE_ERROR(cudaMalloc((void **)&d_ones, h_miniBatch * sizeof(float)));
HANDLE_ERROR(cudaMemcpy(d_ones, h_ones, h_miniBatch * sizeof(float), cudaMemcpyHostToDevice));
free(h_ones);
// allocate for Vi Hj
HANDLE_ERROR(cudaMalloc((void **)&d_vis_data, nvisible * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void **)&d_vis_reco, nvisible * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void **)&d_hid_data, nhidden * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void **)&d_hid_reco, nhidden * sizeof(float)));
}
void deviceMemoryFree(){
HANDLE_ERROR(cudaFree(d_data_v));
HANDLE_ERROR(cudaFree(d_data_h));
HANDLE_ERROR(cudaFree(d_weight));
HANDLE_ERROR(cudaFree(d_a));
HANDLE_ERROR(cudaFree(d_b));
//HANDLE_ERROR(cudaFree(d_rand));
HANDLE_ERROR(cudaFree(d_ones));
HANDLE_ERROR(cudaFree(d_vis_data));
HANDLE_ERROR(cudaFree(d_hid_data));
HANDLE_ERROR(cudaFree(d_vis_reco));
HANDLE_ERROR(cudaFree(d_hid_reco));
}
void updateBias(unit_t u, unsigned offset, unsigned len){
float *d_bias, *d_data, *d_reco;
if(u == VISIBLE){
d_bias = d_a + offset;
d_data = d_vis_data;
d_reco = d_vis_reco;
}
else{
d_bias = d_b + offset;
d_data = d_hid_data;
d_reco = d_vis_reco;
}
cublasStatus_t ret;
ret = cublasSaxpy(handle, len, &learn_rate, d_data, 1, d_bias, 1);
CUBLAS_HANDLE_ERROR(ret);
ret = cublasSaxpy(handle, len, &learn_rate_neg, d_reco, 1, d_bias, 1);
CUBLAS_HANDLE_ERROR(ret);
}
void updateWeight(int offset, int len, float *dev_w){
cublasStatus_t ret;
ret = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T,
nvisible, len, 1, &learn_rate,
d_vis_data, nvisible, d_hid_data + offset,
len, &beta_one, dev_w, nvisible);
CUBLAS_HANDLE_ERROR(ret);
ret = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T,
nvisible, len, 1, &learn_rate_neg,
d_vis_reco, nvisible, d_hid_reco + offset,
len, &beta_one, dev_w, nvisible);
CUBLAS_HANDLE_ERROR(ret);
}
void phase1_TillVisibleRecon(int idx_strm){
unsigned currentStreamBatch;
float *d_weight_strm = d_weight + idx_strm * streamBatch * nvisible;
cublasStatus_t ret;
for(unsigned streamBatch_start = idx_strm * streamBatch; streamBatch_start < nhidden; streamBatch_start += nStream * streamBatch){
/* calculate starting position and length */
if(streamBatch_start + streamBatch > nhidden)
currentStreamBatch = nhidden - streamBatch_start;
else
currentStreamBatch = streamBatch;
/* copy partial weights */
float *h_weight_strm = h_weight + streamBatch_start * nvisible;
CUBLAS_HANDLE_ERROR(cublasSetMatrixAsync(nvisible, currentStreamBatch,
sizeof(float), h_weight_strm, nvisible, d_weight_strm, nvisible, strm[idx_strm]));
/* matrix multiplication for hidden units calculation */
float *d_data_h_strm = d_data_h + streamBatch_start;
ret = cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N,
currentStreamBatch, currentBatch, nvisible, &alpha,
d_weight_strm, nvisible, d_data_v, nvisible, &beta, d_data_h_strm, nhidden);
CUBLAS_HANDLE_ERROR(ret);
/* add bias and sampling */
bias<<<(currentStreamBatch - 1)/256 + 1, 256, 256*sizeof(float), strm[idx_strm]>>>(d_data_h, d_b, streamBatch_start, nhidden, currentStreamBatch);
cudaError_t cuda_ret = cudaGetLastError();
HANDLE_ERROR(cuda_ret);
/* calculate H_j_data */
calcVHij(HIDDEN, streamBatch_start, currentStreamBatch);
/* partially reconstruct visible units */
if(streamBatch_start < nStream * streamBatch)
ret = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N,
nvisible, currentBatch, currentStreamBatch, &alpha,
d_weight_strm, nvisible, d_data_h_strm, nhidden, &beta, dev_data_v_reco[idx_strm], nvisible);
else
ret = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N,
nvisible, currentBatch, currentStreamBatch, &alpha,
d_weight_strm, nvisible, d_data_h_strm, nhidden, &beta_one, dev_data_v_reco[idx_strm], nvisible);
CUBLAS_HANDLE_ERROR(ret);
}
}
void phase2(int idx_strm){
unsigned currentStreamBatch;
float *d_weight_strm = d_weight + idx_strm * nvisible * streamBatch;
for(unsigned streamBatch_start = idx_strm * streamBatch; streamBatch_start < nhidden; streamBatch_start += nStream * streamBatch){
/* calculate starting position and length */
if(streamBatch_start + streamBatch > nhidden)
currentStreamBatch = nhidden - streamBatch_start;
else
currentStreamBatch = streamBatch;
/* copy partial weights */
float *h_weight_strm = h_weight + streamBatch_start * nvisible;
CUBLAS_HANDLE_ERROR(cublasSetMatrixAsync(nvisible, currentStreamBatch,
sizeof(float), h_weight_strm, nvisible, d_weight_strm, nvisible, strm[idx_strm]));
/* matrix multiplication for hidden units calculation */
float *d_data_h_strm = d_data_h + streamBatch_start;
cublasStatus_t ret = cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N,
currentStreamBatch, currentBatch, nvisible, &alpha,
d_weight_strm, nvisible, d_data_v_reco, nvisible, &beta, d_data_h_strm, nhidden);
CUBLAS_HANDLE_ERROR(ret);
/* add bias and sampling */
bias<<<(currentStreamBatch - 1)/256 + 1, 256, 256*sizeof(float), strm[idx_strm]>>>(d_data_h, d_b, streamBatch_start, nhidden, currentStreamBatch);
cudaError_t cuda_ret = cudaGetLastError();
HANDLE_ERROR(cuda_ret);
/* calculate H_j_reco */
calcVHij(HIDDEN_RECO, streamBatch_start, currentStreamBatch);
/* update bias for hidden */
updateBias(HIDDEN, streamBatch_start, currentStreamBatch);
/* update weights */
updateWeight(streamBatch_start, currentStreamBatch, d_weight_strm);
/* copy the new weights back to host */
CUBLAS_HANDLE_ERROR(cublasGetMatrixAsync(nvisible, currentStreamBatch,
sizeof(float), d_weight_strm, nvisible, h_weight_strm, nvisible, strm[idx_strm]));
}
}
void cublasRunRBM(){
// data
float *h_data_h = (float *)malloc(sizeof(float) * nhidden* nvisible);
float msecTotal = 0.0f;
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, NULL));
cublasStatus_t ret;
ret = cublasCreate(&handle);
CUBLAS_HANDLE_ERROR(ret);
deviceMemoryAlloc();
/* initialize streams and events */
strm = (cudaStream_t *)malloc(nStream * sizeof(cudaStream_t));
evt = (cudaEvent_t *)malloc(nStream * sizeof(cudaEvent_t));
for(int j = 0; j < nStream; ++ j){
HANDLE_ERROR(cudaStreamCreate(&strm[j]));
HANDLE_ERROR(cudaEventCreate(&evt[j]));
}
/* main loop over all samples by mini-batch */
for(unsigned i = 0; i < ninst; i += h_miniBatch){
/* copy mini-batch in default stream */
CUBLAS_HANDLE_ERROR(cublasSetStream(handle, NULL));
currentBatch = copyMiniBatchToDevice(i);
/* sync for mini-batch copy */
cudaDeviceSynchronize();
/* calculate V_i_data */
calcVHij(VISIBLE, 0, nvisible);
/* concurrent streams */
for(int j = 0; j < nStream; ++ j){
CUBLAS_HANDLE_ERROR(cublasSetStream(handle, strm[j]));
phase1_TillVisibleRecon(j);
}
/* sync for visible recon matrix by all streams and sum up
return to default NULL stream, implicit sync */
int streamUsed;
if(1.0*nhidden/streamBatch > (nStream - 1))
streamUsed = nStream;
else
streamUsed = (nhidden - 1)/streamBatch + 1;
sumUpVisReco<<<(currentBatch * nvisible)/256 + 1, 256>>>(streamUsed, currentBatch * nvisible, d_data_v_reco);
bias<<<(nvisible - 1)/256 + 1, 256, 256*sizeof(float)>>>(d_data_v_reco, d_a, 0, nvisible, nvisible);
//cudaDeviceSynchronize();
cudaError_t cuda_ret = cudaGetLastError();
HANDLE_ERROR(cuda_ret);
/* calculate V_i_reco */
CUBLAS_HANDLE_ERROR(cublasSetStream(handle, NULL));
calcVHij(VISIBLE_RECO, 0, nvisible);
/* update bias for visible */
updateBias(VISIBLE, 0, nvisible);
cudaDeviceSynchronize();
/* concurrent streams */
for(int j = 0; j < nStream; ++ j){
CUBLAS_HANDLE_ERROR(cublasSetStream(handle, strm[j]));
phase2(j);
}
}
cudaDeviceSynchronize();
cublasDestroy(handle);
//unsigned row = currentBatch;
//unsigned row = 1;
//HANDLE_ERROR(cudaMemcpy(h_data_h, d_weight, sizeof(float)*row*col, cudaMemcpyDeviceToHost));
//printArray(h_data_h, row, col);
//printArray(h_weight, row, col);
//printArray(eigen_data_h, row, col);
//cout << "sqare norm: " << sqn(h_data_h, eigen_data_h, row * col) << endl;
/*
unsigned row = nvisible;
unsigned col = nhidden;
cout << "sqare norm: " << sqn(h_weight, eigen_data_h, row * col) << endl;
*/
HANDLE_ERROR(cudaEventRecord(stop, NULL));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&msecTotal, start, stop));
printf("\tcublas: %.2f msec\n", msecTotal);
deviceMemoryFree();
free(h_data_h);
}
|
34aced90830cd35b9bfbad62272517f58faa0e71.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2016 University of Cordoba and University of Illinois
* All rights reserved.
*
* Developed by: IMPACT Research Group
* University of Cordoba and University of Illinois
* http://impact.crhc.illinois.edu/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* with the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* > Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
* > Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimers in the
* documentation and/or other materials provided with the distribution.
* > Neither the names of IMPACT Research Group, University of Cordoba,
* University of Illinois nor the names of its contributors may be used
* to endorse or promote products derived from this Software without
* specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
* THE SOFTWARE.
*
*/
#include "support/common.h"
extern __shared__ int l_mem[];
// GPU kernel ------------------------------------------------------------------------------------------
__global__ void PTTWAC_soa_asta(int A, int B, int b, T *input, int *finished, int *head) {
int* done = l_mem;
int* gid_ = &done[1];
const int tid = threadIdx.x;
int m = A * B - 1;
if(tid == 0) // Dynamic fetch
gid_[0] = atomicAdd(&head[0], 1);
__syncthreads();
while(gid_[0] < m) {
int next_in_cycle = (gid_[0] * A) - m * (gid_[0] / B);
if(next_in_cycle == gid_[0]) {
if(tid == 0) // Dynamic fetch
gid_[0] = atomicAdd(&head[0], 1);
__syncthreads();
continue;
}
T data1, data2, data3, data4;
int i = tid;
if(i < b)
data1 = input[gid_[0] * b + i];
i += blockDim.x;
if(i < b)
data2 = input[gid_[0] * b + i];
i += blockDim.x;
if(i < b)
data3 = input[gid_[0] * b + i];
i += blockDim.x;
if(i < b)
data4 = input[gid_[0] * b + i];
if(tid == 0) {
//make sure the read is not cached
done[0] = atomicAdd(&finished[gid_[0]], 0);
}
__syncthreads();
for(; done[0] == 0; next_in_cycle = (next_in_cycle * A) - m * (next_in_cycle / B)) {
T backup1, backup2, backup3, backup4;
i = tid;
if(i < b)
backup1 = input[next_in_cycle * b + i];
i += blockDim.x;
if(i < b)
backup2 = input[next_in_cycle * b + i];
i += blockDim.x;
if(i < b)
backup3 = input[next_in_cycle * b + i];
i += blockDim.x;
if(i < b)
backup4 = input[next_in_cycle * b + i];
if(tid == 0) {
done[0] = atomicExch(&finished[next_in_cycle], (int)1);
}
__syncthreads();
if(!done[0]) {
i = tid;
if(i < b)
input[next_in_cycle * b + i] = data1;
i += blockDim.x;
if(i < b)
input[next_in_cycle * b + i] = data2;
i += blockDim.x;
if(i < b)
input[next_in_cycle * b + i] = data3;
i += blockDim.x;
if(i < b)
input[next_in_cycle * b + i] = data4;
}
i = tid;
if(i < b)
data1 = backup1;
i += blockDim.x;
if(i < b)
data2 = backup2;
i += blockDim.x;
if(i < b)
data3 = backup3;
i += blockDim.x;
if(i < b)
data4 = backup4;
}
if(tid == 0) // Dynamic fetch
gid_[0] = atomicAdd(&head[0], 1);
__syncthreads();
}
}
hipError_t call_PTTWAC_soa_asta(int blocks, int threads, int A, int B, int b, T *input,
int *finished, int *head, int l_mem_size){
dim3 dimGrid(blocks);
dim3 dimBlock(threads);
hipLaunchKernelGGL(( PTTWAC_soa_asta), dim3(dimGrid), dim3(dimBlock), l_mem_size, 0, A, B, b, input,
finished, head);
hipError_t err = hipGetLastError();
return err;
}
__global__ void BS_marshal(T *input, int tile_size, int width) {
T* tile = (T*)l_mem;
int tidx = threadIdx.x;
int m = width*tile_size-1;
int bid = blockIdx.x;
input += tile_size*width*bid;
for (int i = tidx; i < tile_size*width; i+=blockDim.x) {
int next = (i * tile_size)-m*(i/width);
tile[next] = input[i];
}
__syncthreads();
for (int i = tidx; i < tile_size*width; i+=blockDim.x) {
input[i] = tile[i];
}
}
hipError_t call_BS_marshal(int blocks, int threads, int m, int n, T *input, int l_mem_size){
dim3 dimGrid(blocks);
dim3 dimBlock(threads);
hipLaunchKernelGGL(( BS_marshal), dim3(dimGrid), dim3(dimBlock), l_mem_size, 0, input, m, n);
hipError_t err = hipGetLastError();
return err;
}
| 34aced90830cd35b9bfbad62272517f58faa0e71.cu | /*
* Copyright (c) 2016 University of Cordoba and University of Illinois
* All rights reserved.
*
* Developed by: IMPACT Research Group
* University of Cordoba and University of Illinois
* http://impact.crhc.illinois.edu/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* with the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* > Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
* > Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimers in the
* documentation and/or other materials provided with the distribution.
* > Neither the names of IMPACT Research Group, University of Cordoba,
* University of Illinois nor the names of its contributors may be used
* to endorse or promote products derived from this Software without
* specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
* THE SOFTWARE.
*
*/
#include "support/common.h"
extern __shared__ int l_mem[];
// GPU kernel ------------------------------------------------------------------------------------------
__global__ void PTTWAC_soa_asta(int A, int B, int b, T *input, int *finished, int *head) {
int* done = l_mem;
int* gid_ = &done[1];
const int tid = threadIdx.x;
int m = A * B - 1;
if(tid == 0) // Dynamic fetch
gid_[0] = atomicAdd(&head[0], 1);
__syncthreads();
while(gid_[0] < m) {
int next_in_cycle = (gid_[0] * A) - m * (gid_[0] / B);
if(next_in_cycle == gid_[0]) {
if(tid == 0) // Dynamic fetch
gid_[0] = atomicAdd(&head[0], 1);
__syncthreads();
continue;
}
T data1, data2, data3, data4;
int i = tid;
if(i < b)
data1 = input[gid_[0] * b + i];
i += blockDim.x;
if(i < b)
data2 = input[gid_[0] * b + i];
i += blockDim.x;
if(i < b)
data3 = input[gid_[0] * b + i];
i += blockDim.x;
if(i < b)
data4 = input[gid_[0] * b + i];
if(tid == 0) {
//make sure the read is not cached
done[0] = atomicAdd(&finished[gid_[0]], 0);
}
__syncthreads();
for(; done[0] == 0; next_in_cycle = (next_in_cycle * A) - m * (next_in_cycle / B)) {
T backup1, backup2, backup3, backup4;
i = tid;
if(i < b)
backup1 = input[next_in_cycle * b + i];
i += blockDim.x;
if(i < b)
backup2 = input[next_in_cycle * b + i];
i += blockDim.x;
if(i < b)
backup3 = input[next_in_cycle * b + i];
i += blockDim.x;
if(i < b)
backup4 = input[next_in_cycle * b + i];
if(tid == 0) {
done[0] = atomicExch(&finished[next_in_cycle], (int)1);
}
__syncthreads();
if(!done[0]) {
i = tid;
if(i < b)
input[next_in_cycle * b + i] = data1;
i += blockDim.x;
if(i < b)
input[next_in_cycle * b + i] = data2;
i += blockDim.x;
if(i < b)
input[next_in_cycle * b + i] = data3;
i += blockDim.x;
if(i < b)
input[next_in_cycle * b + i] = data4;
}
i = tid;
if(i < b)
data1 = backup1;
i += blockDim.x;
if(i < b)
data2 = backup2;
i += blockDim.x;
if(i < b)
data3 = backup3;
i += blockDim.x;
if(i < b)
data4 = backup4;
}
if(tid == 0) // Dynamic fetch
gid_[0] = atomicAdd(&head[0], 1);
__syncthreads();
}
}
cudaError_t call_PTTWAC_soa_asta(int blocks, int threads, int A, int B, int b, T *input,
int *finished, int *head, int l_mem_size){
dim3 dimGrid(blocks);
dim3 dimBlock(threads);
PTTWAC_soa_asta<<<dimGrid, dimBlock, l_mem_size>>>(A, B, b, input,
finished, head);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void BS_marshal(T *input, int tile_size, int width) {
T* tile = (T*)l_mem;
int tidx = threadIdx.x;
int m = width*tile_size-1;
int bid = blockIdx.x;
input += tile_size*width*bid;
for (int i = tidx; i < tile_size*width; i+=blockDim.x) {
int next = (i * tile_size)-m*(i/width);
tile[next] = input[i];
}
__syncthreads();
for (int i = tidx; i < tile_size*width; i+=blockDim.x) {
input[i] = tile[i];
}
}
cudaError_t call_BS_marshal(int blocks, int threads, int m, int n, T *input, int l_mem_size){
dim3 dimGrid(blocks);
dim3 dimBlock(threads);
BS_marshal<<<dimGrid, dimBlock, l_mem_size>>>(input, m, n);
cudaError_t err = cudaGetLastError();
return err;
}
|
074dad2e9fe1ca25c291b69acf089a858b71ab5c.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
// Stringstream is a big hammer, but I want to rely on operator<< for dtype.
#include <sstream>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template<typename in_t, typename out_t>
struct ScaleFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<2>& tl,
float scale)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
in_t* in = (in_t*)tl.addresses[0][tensor_loc];
in += chunk_idx*chunk_size;
out_t* out = (out_t*)tl.addresses[1][tensor_loc];
out += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
// Non-divergent exit condition for __syncthreads, not necessary here
float incoming_vals[ILP];
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
incoming_vals[ii] = 0;
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
incoming_vals[ii] = static_cast<float>(in[i]);
}
// note for clarification to future michael:
// From a pure memory dependency perspective, there's likely no point unrolling
// the write loop, since writes just fire off once their LDGs arrive.
// Put another way, the STGs are dependent on the LDGs, but not on each other.
// There is still compute ILP benefit from unrolling the loop though.
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
out[i] = static_cast<out_t>(incoming_vals[ii]*scale);
if(!isfinite(incoming_vals[ii]))
*noop_gmem = 1; // Blindly fire off a write. These will race but that's ok.
}
}
}
}
};
void multi_tensor_scale_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
float scale)
{
using namespace at;
// The output (downscaled) type is always float.
// If build times suffer, think about where to put this dispatch,
// and what logic should be moved out of multi_tensor_apply.
AT_DISPATCH_FLOATING_TYPES_AND_HALF(tensor_lists[0][0].type(),
"multi_tensor_scale_cuda",
[&]
{
// using accscalar_t = acc_type<scalar_t, true>;
switch(tensor_lists[1][0].scalar_type())
{
case at::ScalarType::Half:
multi_tensor_apply<2>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
ScaleFunctor<scalar_t, at::Half>(),
scale);
break;
case at::ScalarType::Float:
multi_tensor_apply<2>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
ScaleFunctor<scalar_t, float>(),
scale);
break;
default:
std::stringstream ss;
ss << "multi_tensor_scale_cuda not implemented for output type = "
<< tensor_lists[1][0].dtype();
AT_ERROR(ss.str().c_str());
}
});
AT_CUDA_CHECK(hipGetLastError());
// AT_CUDA_CHECK(hipDeviceSynchronize());
}
| 074dad2e9fe1ca25c291b69acf089a858b71ab5c.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
// Stringstream is a big hammer, but I want to rely on operator<< for dtype.
#include <sstream>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template<typename in_t, typename out_t>
struct ScaleFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<2>& tl,
float scale)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
in_t* in = (in_t*)tl.addresses[0][tensor_loc];
in += chunk_idx*chunk_size;
out_t* out = (out_t*)tl.addresses[1][tensor_loc];
out += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
// Non-divergent exit condition for __syncthreads, not necessary here
float incoming_vals[ILP];
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
incoming_vals[ii] = 0;
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
incoming_vals[ii] = static_cast<float>(in[i]);
}
// note for clarification to future michael:
// From a pure memory dependency perspective, there's likely no point unrolling
// the write loop, since writes just fire off once their LDGs arrive.
// Put another way, the STGs are dependent on the LDGs, but not on each other.
// There is still compute ILP benefit from unrolling the loop though.
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
out[i] = static_cast<out_t>(incoming_vals[ii]*scale);
if(!isfinite(incoming_vals[ii]))
*noop_gmem = 1; // Blindly fire off a write. These will race but that's ok.
}
}
}
}
};
void multi_tensor_scale_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
float scale)
{
using namespace at;
// The output (downscaled) type is always float.
// If build times suffer, think about where to put this dispatch,
// and what logic should be moved out of multi_tensor_apply.
AT_DISPATCH_FLOATING_TYPES_AND_HALF(tensor_lists[0][0].type(),
"multi_tensor_scale_cuda",
[&]
{
// using accscalar_t = acc_type<scalar_t, true>;
switch(tensor_lists[1][0].scalar_type())
{
case at::ScalarType::Half:
multi_tensor_apply<2>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
ScaleFunctor<scalar_t, at::Half>(),
scale);
break;
case at::ScalarType::Float:
multi_tensor_apply<2>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
ScaleFunctor<scalar_t, float>(),
scale);
break;
default:
std::stringstream ss;
ss << "multi_tensor_scale_cuda not implemented for output type = "
<< tensor_lists[1][0].dtype();
AT_ERROR(ss.str().c_str());
}
});
AT_CUDA_CHECK(cudaGetLastError());
// AT_CUDA_CHECK(cudaDeviceSynchronize());
}
|
2b38510213a2cc73e12253067fd981fc68980f18.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "devarea.hpp"
#include "protocol.hpp"
#include "debug.h"
#include "ptx_stub.h"
__device__ DeviceArea DEVICE_AREA_GLOBAL_NAME;
extern "C" __global__ void INIT_FUNCTION_NAME(DeviceArea device_area)
{
memcpy(&DEVICE_AREA_GLOBAL_NAME, &device_area, sizeof(device_area));
for(int i = 0; i < DEVICE_AREA_GLOBAL_NAME.numq(); ++ i)
{
PCHeader* pcheader = DEVICE_AREA_GLOBAL_NAME.header(i);
pcheader->read_head = 0;
pcheader->write_head = 0;
pcheader->tail = 0;
DEBUGONLY(printf("PC Buffer %i initialized, at %p, size is: %i\n", i, pcheader, DEVICE_AREA_GLOBAL_NAME.qbuf_size());)
}
__threadfence_system();
}
static __device__ FINLINE unsigned int __ptx_laneid()
{
unsigned int value;
asm volatile("mov.u32 %0, %%laneid;" : "=r"(value));
return value;
}
extern "C" __device__ __attribute__((noinline)) uint64_t GETTID_FUNCTION_NAME(int streamid)
{
return BUILD_ADDRESS(streamid,
(blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z),
((threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x));
}
extern "C" __device__ __attribute__((noinline)) void STORE_OP_FUNCTION_NAME(uint64_t tid, const void* address, int op, int loc_id)
{
const unsigned int active = __ballot(1);
const unsigned int myidx = __ptx_laneid();
const unsigned int ldridx = __ffs(active) - 1;
const int qid = blockIdx.x % DEVICE_AREA_GLOBAL_NAME.numq(); // XXX: change to SM number
const int size = DEVICE_AREA_GLOBAL_NAME.qbuf_size();
int pos = 0;
PCHeader* pcheader = DEVICE_AREA_GLOBAL_NAME.header(qid);
PCRecord* pcstart = DeviceArea::start(pcheader);
PCRecord* record = NULL;
if(ldridx == myidx)
{
volatile unsigned int* tail = (volatile unsigned int*)&pcheader->tail;
pos = atomicAdd(&pcheader->write_head, 1);
while((pos - *tail) >= size)
__threadfence_system();
}
pos = __shfl(pos, ldridx);
record = pcstart + (pos % size);
DEBUGONLY(printf("bi=%i ti=%i myidx=%i ldridx=%i pos=%i record=%p ra=%p\n", blockIdx.x, threadIdx.x, myidx, ldridx, pos, record, &record->address[myidx]);)
record->address[myidx] = (slimptr_t)(uintptr_t)address;
if(ldridx == myidx)
{
record->tid = tid;
record->active = active;
record->op = (uint16_t)op;
record->loc_id = (uint16_t)loc_id;
while(atomicCAS(&pcheader->read_head, pos, pos + 1) != pos)
__threadfence();
}
__threadfence_system();
}
extern "C" __global__ void force_function_linking(uint64_t* tid)
{
*tid = GETTID_FUNCTION_NAME(0x1234);
STORE_OP_FUNCTION_NAME(*tid, NULL, OP_SYNCTHREADS, 1);
}
int main (int argc, char* argv[])
{
uint64_t* x;
if(0 != hipMalloc(&x, sizeof(uint64_t)))
{
printf("Failed hipMalloc().\n");
return 1;
}
void* buf;
int buf_size = 64 * 1000;
if(0 != hipMalloc(&buf, buf_size))
{
printf("Failed hipMalloc().\n");
return 1;
}
DeviceArea devarea(buf, buf_size, 2);
hipLaunchKernelGGL(( INIT_FUNCTION_NAME), dim3(1),dim3(1), 0, 0, devarea);
int sync = hipDeviceSynchronize();
if(sync != 0)
{
printf("%s failed, err=%i\n", NAMEOF_INIT_FUNCTION_NAME, sync);
return 2;
}
hipLaunchKernelGGL(( force_function_linking), dim3(1),dim3(1), 0, 0, x);
sync = hipDeviceSynchronize();
if(sync != 0)
{
printf("Link function failed, err=%i\n", sync);
return 2;
}
printf("PTX stubs tested OK!\n");
return 0;
}
| 2b38510213a2cc73e12253067fd981fc68980f18.cu | #include <cuda_runtime.h>
#include "devarea.hpp"
#include "protocol.hpp"
#include "debug.h"
#include "ptx_stub.h"
__device__ DeviceArea DEVICE_AREA_GLOBAL_NAME;
extern "C" __global__ void INIT_FUNCTION_NAME(DeviceArea device_area)
{
memcpy(&DEVICE_AREA_GLOBAL_NAME, &device_area, sizeof(device_area));
for(int i = 0; i < DEVICE_AREA_GLOBAL_NAME.numq(); ++ i)
{
PCHeader* pcheader = DEVICE_AREA_GLOBAL_NAME.header(i);
pcheader->read_head = 0;
pcheader->write_head = 0;
pcheader->tail = 0;
DEBUGONLY(printf("PC Buffer %i initialized, at %p, size is: %i\n", i, pcheader, DEVICE_AREA_GLOBAL_NAME.qbuf_size());)
}
__threadfence_system();
}
static __device__ FINLINE unsigned int __ptx_laneid()
{
unsigned int value;
asm volatile("mov.u32 %0, %%laneid;" : "=r"(value));
return value;
}
extern "C" __device__ __attribute__((noinline)) uint64_t GETTID_FUNCTION_NAME(int streamid)
{
return BUILD_ADDRESS(streamid,
(blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z),
((threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x));
}
extern "C" __device__ __attribute__((noinline)) void STORE_OP_FUNCTION_NAME(uint64_t tid, const void* address, int op, int loc_id)
{
const unsigned int active = __ballot(1);
const unsigned int myidx = __ptx_laneid();
const unsigned int ldridx = __ffs(active) - 1;
const int qid = blockIdx.x % DEVICE_AREA_GLOBAL_NAME.numq(); // XXX: change to SM number
const int size = DEVICE_AREA_GLOBAL_NAME.qbuf_size();
int pos = 0;
PCHeader* pcheader = DEVICE_AREA_GLOBAL_NAME.header(qid);
PCRecord* pcstart = DeviceArea::start(pcheader);
PCRecord* record = NULL;
if(ldridx == myidx)
{
volatile unsigned int* tail = (volatile unsigned int*)&pcheader->tail;
pos = atomicAdd(&pcheader->write_head, 1);
while((pos - *tail) >= size)
__threadfence_system();
}
pos = __shfl(pos, ldridx);
record = pcstart + (pos % size);
DEBUGONLY(printf("bi=%i ti=%i myidx=%i ldridx=%i pos=%i record=%p ra=%p\n", blockIdx.x, threadIdx.x, myidx, ldridx, pos, record, &record->address[myidx]);)
record->address[myidx] = (slimptr_t)(uintptr_t)address;
if(ldridx == myidx)
{
record->tid = tid;
record->active = active;
record->op = (uint16_t)op;
record->loc_id = (uint16_t)loc_id;
while(atomicCAS(&pcheader->read_head, pos, pos + 1) != pos)
__threadfence();
}
__threadfence_system();
}
extern "C" __global__ void force_function_linking(uint64_t* tid)
{
*tid = GETTID_FUNCTION_NAME(0x1234);
STORE_OP_FUNCTION_NAME(*tid, NULL, OP_SYNCTHREADS, 1);
}
int main (int argc, char* argv[])
{
uint64_t* x;
if(0 != cudaMalloc(&x, sizeof(uint64_t)))
{
printf("Failed cudaMalloc().\n");
return 1;
}
void* buf;
int buf_size = 64 * 1000;
if(0 != cudaMalloc(&buf, buf_size))
{
printf("Failed cudaMalloc().\n");
return 1;
}
DeviceArea devarea(buf, buf_size, 2);
INIT_FUNCTION_NAME<<<1,1>>>(devarea);
int sync = cudaDeviceSynchronize();
if(sync != 0)
{
printf("%s failed, err=%i\n", NAMEOF_INIT_FUNCTION_NAME, sync);
return 2;
}
force_function_linking<<<1,1>>>(x);
sync = cudaDeviceSynchronize();
if(sync != 0)
{
printf("Link function failed, err=%i\n", sync);
return 2;
}
printf("PTX stubs tested OK!\n");
return 0;
}
|
106da97973618be786bd28bd1386ca3c3ed2fa62.hip | // !!! This is a file automatically generated by hipify!!!
// This file is auto-generated. See "generate_kernels.sh"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM50(cutlass::bfloat16_t, true);
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM70(cutlass::bfloat16_t, true);
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM75(cutlass::bfloat16_t, true);
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM80(cutlass::bfloat16_t, true);
| 106da97973618be786bd28bd1386ca3c3ed2fa62.cu | // This file is auto-generated. See "generate_kernels.sh"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM50(cutlass::bfloat16_t, true);
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM70(cutlass::bfloat16_t, true);
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM75(cutlass::bfloat16_t, true);
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM80(cutlass::bfloat16_t, true);
|
959500571fc928c53fea2ebb1610f19493ec3042.hip | // !!! This is a file automatically generated by hipify!!!
/*
* extract_minimums.cu
*
* Created on: Mar 8, 2015
* Author: nicolas.legroux
*/
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include "extract_minimums.h"
#include "../utilities.h"
#define BLOCK_DIM_X 512
#define MAX_DOUBLE 10000000000000.0;
__global__ void set_maximum_double_value(double * data, int * indexes_to_reset, int n_train, int n_test){
unsigned int thread_global_idx = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_global_idx < n_test){
int index_to_reset = indexes_to_reset[thread_global_idx];
data[n_train * thread_global_idx + index_to_reset] = MAX_DOUBLE;
}
}
__global__ void find_minimum(double * data, int * indexes, int * output_indexes,
int n_train, int n_test) {
__shared__ double block_data[BLOCK_DIM_X];
__shared__ int block_indexes[BLOCK_DIM_X];
unsigned int thread_block_idx = threadIdx.x;
unsigned int thread_global_idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int thread_global_idy = blockIdx.y * blockDim.y ;
if (thread_global_idx < n_train) {
block_data[thread_block_idx] = data[thread_global_idy * n_train + thread_global_idx];
block_indexes[thread_block_idx] = indexes[thread_global_idy * n_train + thread_global_idx];
} else {
block_data[thread_block_idx] = MAX_DOUBLE
block_indexes[thread_block_idx] = -1;
}
__syncthreads();
for (unsigned int s = 1; s < blockDim.x; s *= 2) {
if (thread_block_idx % (2 * s) == 0) {
if (block_data[thread_block_idx + s]
< block_data[thread_block_idx]) {
block_data[thread_block_idx] = block_data[thread_block_idx + s];
block_indexes[thread_block_idx] = block_indexes[thread_block_idx
+ s];
}
}
}
if (thread_block_idx == 0) {
output_indexes[thread_global_idy * gridDim.x + blockIdx.x] = block_indexes[0];
}
__syncthreads();
}
void find_k_minimums(double * data, int n_train, int n_test, int k, int * k_minimum_indexes) {
double* d_data;
int* d_indexes;
int* d_indexes_to_reset;
int* output_indexes;
int* host_indexes_cpy;
int data_size = n_train * n_test * sizeof(double);
int indexes_size = n_train * n_test * sizeof(int);
int * indexes = (int*) malloc(n_train * n_test * sizeof(int));
for(int i=0; i<n_test; i++){
for(int j=0; j < n_train; j++){
indexes[i*n_train+j] = j;
}
}
checkCudaErrors(hipMalloc((void**) &d_data, data_size));
checkCudaErrors(
hipMemcpy(d_data, data, data_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void**) &d_indexes, indexes_size));
checkCudaErrors(
hipMemcpy(d_indexes, indexes, indexes_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void**) &d_indexes_to_reset, n_test*sizeof(int)));
int dim_grid_y = n_test;
int dim_block_y = 1;
int dim_block_x = BLOCK_DIM_X;
int dim_grid_x = n_train / dim_block_x;
if (n_train % BLOCK_DIM_X != 0) {
dim_grid_x++;
}
dim3 dim_grid(dim_grid_x, dim_grid_y);
dim3 dim_block(dim_block_x, dim_block_y);
checkCudaErrors(hipMalloc((void**) &output_indexes, dim_grid.x * n_test * sizeof(int)));
host_indexes_cpy = (int*) malloc(dim_grid.x * n_test * sizeof(int));
int * index_minimums = (int*) malloc(n_test * sizeof(int));
int dim_block_x_reset = BLOCK_DIM_X/4;
int dim_grid_x_reset = n_test / dim_block_x_reset;
if(n_test % dim_block_x_reset != 0){
dim_grid_x_reset++;
}
for(int j=0; j<k; j++){
hipLaunchKernelGGL(( find_minimum), dim3(dim_grid), dim3(dim_block), 0, 0, d_data, d_indexes, output_indexes, n_train, n_test);
checkCudaErrors(hipMemcpy(host_indexes_cpy, output_indexes, dim_grid.x* n_test * sizeof(int), hipMemcpyDeviceToHost));
for(int i=0; i<n_test; i++){
index_minimums[i] = find_minimum_cpu((data+i*n_train), (host_indexes_cpy + i*dim_grid.x), dim_grid.x);
k_minimum_indexes[i*k+j] = index_minimums[i];
}
checkCudaErrors(
hipMemcpy(d_indexes_to_reset, index_minimums, n_test*sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( set_maximum_double_value), dim3(dim_grid_x_reset), dim3(dim_block_x_reset), 0, 0, d_data, d_indexes_to_reset, n_train, n_test);
hipError_t err = hipDeviceSynchronize();
if(err != hipSuccess){
printf("hipDeviceSynchronize error: %s\n", hipGetErrorString(err));
exit(1);
}
}
free(host_indexes_cpy);
free(index_minimums);
checkCudaErrors(hipFree(d_data));
checkCudaErrors(hipFree(d_indexes));
checkCudaErrors(hipFree(output_indexes));
checkCudaErrors(hipFree(d_indexes_to_reset));
}
int find_minimum_cpu(double * data, int * indexes, int n){
int index_min = indexes[0];
double min_value = data[index_min];
for(int i=1; i< n; i++){
if(data[indexes[i]] < min_value){
min_value = data[indexes[i]];
index_min = indexes[i];
}
}
return index_min;
}
void test_extract_minimum(int n_train, int n_test, int k){
double * data = new double[n_train * n_test];
int * k_minimum_indexes = (int*) malloc(k*n_test*sizeof(int));
array_fill(data, n_train * n_test);
//print_vectors_in_row_major_order(data, n_test, n_train);
find_k_minimums(data, n_train, n_test, k, k_minimum_indexes);
//print_vectors_in_row_major_order(k_minimum_indexes, n_test, k);
delete[] data;
delete[] k_minimum_indexes;
}
| 959500571fc928c53fea2ebb1610f19493ec3042.cu | /*
* extract_minimums.cu
*
* Created on: Mar 8, 2015
* Author: nicolas.legroux
*/
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include "extract_minimums.h"
#include "../utilities.h"
#define BLOCK_DIM_X 512
#define MAX_DOUBLE 10000000000000.0;
__global__ void set_maximum_double_value(double * data, int * indexes_to_reset, int n_train, int n_test){
unsigned int thread_global_idx = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_global_idx < n_test){
int index_to_reset = indexes_to_reset[thread_global_idx];
data[n_train * thread_global_idx + index_to_reset] = MAX_DOUBLE;
}
}
__global__ void find_minimum(double * data, int * indexes, int * output_indexes,
int n_train, int n_test) {
__shared__ double block_data[BLOCK_DIM_X];
__shared__ int block_indexes[BLOCK_DIM_X];
unsigned int thread_block_idx = threadIdx.x;
unsigned int thread_global_idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int thread_global_idy = blockIdx.y * blockDim.y ;
if (thread_global_idx < n_train) {
block_data[thread_block_idx] = data[thread_global_idy * n_train + thread_global_idx];
block_indexes[thread_block_idx] = indexes[thread_global_idy * n_train + thread_global_idx];
} else {
block_data[thread_block_idx] = MAX_DOUBLE
block_indexes[thread_block_idx] = -1;
}
__syncthreads();
for (unsigned int s = 1; s < blockDim.x; s *= 2) {
if (thread_block_idx % (2 * s) == 0) {
if (block_data[thread_block_idx + s]
< block_data[thread_block_idx]) {
block_data[thread_block_idx] = block_data[thread_block_idx + s];
block_indexes[thread_block_idx] = block_indexes[thread_block_idx
+ s];
}
}
}
if (thread_block_idx == 0) {
output_indexes[thread_global_idy * gridDim.x + blockIdx.x] = block_indexes[0];
}
__syncthreads();
}
void find_k_minimums(double * data, int n_train, int n_test, int k, int * k_minimum_indexes) {
double* d_data;
int* d_indexes;
int* d_indexes_to_reset;
int* output_indexes;
int* host_indexes_cpy;
int data_size = n_train * n_test * sizeof(double);
int indexes_size = n_train * n_test * sizeof(int);
int * indexes = (int*) malloc(n_train * n_test * sizeof(int));
for(int i=0; i<n_test; i++){
for(int j=0; j < n_train; j++){
indexes[i*n_train+j] = j;
}
}
checkCudaErrors(cudaMalloc((void**) &d_data, data_size));
checkCudaErrors(
cudaMemcpy(d_data, data, data_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void**) &d_indexes, indexes_size));
checkCudaErrors(
cudaMemcpy(d_indexes, indexes, indexes_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void**) &d_indexes_to_reset, n_test*sizeof(int)));
int dim_grid_y = n_test;
int dim_block_y = 1;
int dim_block_x = BLOCK_DIM_X;
int dim_grid_x = n_train / dim_block_x;
if (n_train % BLOCK_DIM_X != 0) {
dim_grid_x++;
}
dim3 dim_grid(dim_grid_x, dim_grid_y);
dim3 dim_block(dim_block_x, dim_block_y);
checkCudaErrors(cudaMalloc((void**) &output_indexes, dim_grid.x * n_test * sizeof(int)));
host_indexes_cpy = (int*) malloc(dim_grid.x * n_test * sizeof(int));
int * index_minimums = (int*) malloc(n_test * sizeof(int));
int dim_block_x_reset = BLOCK_DIM_X/4;
int dim_grid_x_reset = n_test / dim_block_x_reset;
if(n_test % dim_block_x_reset != 0){
dim_grid_x_reset++;
}
for(int j=0; j<k; j++){
find_minimum<<<dim_grid, dim_block>>>(d_data, d_indexes, output_indexes, n_train, n_test);
checkCudaErrors(cudaMemcpy(host_indexes_cpy, output_indexes, dim_grid.x* n_test * sizeof(int), cudaMemcpyDeviceToHost));
for(int i=0; i<n_test; i++){
index_minimums[i] = find_minimum_cpu((data+i*n_train), (host_indexes_cpy + i*dim_grid.x), dim_grid.x);
k_minimum_indexes[i*k+j] = index_minimums[i];
}
checkCudaErrors(
cudaMemcpy(d_indexes_to_reset, index_minimums, n_test*sizeof(int), cudaMemcpyHostToDevice));
set_maximum_double_value<<<dim_grid_x_reset, dim_block_x_reset>>>(d_data, d_indexes_to_reset, n_train, n_test);
cudaError_t err = cudaThreadSynchronize();
if(err != cudaSuccess){
printf("cudaThreadSynchronize error: %s\n", cudaGetErrorString(err));
exit(1);
}
}
free(host_indexes_cpy);
free(index_minimums);
checkCudaErrors(cudaFree(d_data));
checkCudaErrors(cudaFree(d_indexes));
checkCudaErrors(cudaFree(output_indexes));
checkCudaErrors(cudaFree(d_indexes_to_reset));
}
int find_minimum_cpu(double * data, int * indexes, int n){
int index_min = indexes[0];
double min_value = data[index_min];
for(int i=1; i< n; i++){
if(data[indexes[i]] < min_value){
min_value = data[indexes[i]];
index_min = indexes[i];
}
}
return index_min;
}
void test_extract_minimum(int n_train, int n_test, int k){
double * data = new double[n_train * n_test];
int * k_minimum_indexes = (int*) malloc(k*n_test*sizeof(int));
array_fill(data, n_train * n_test);
//print_vectors_in_row_major_order(data, n_test, n_train);
find_k_minimums(data, n_train, n_test, k, k_minimum_indexes);
//print_vectors_in_row_major_order(k_minimum_indexes, n_test, k);
delete[] data;
delete[] k_minimum_indexes;
}
|
afba48048e2e897f78a6f18cca63977c4862fc86.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "SyncCells.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *i_cells = NULL;
hipMalloc(&i_cells, XSIZE*YSIZE);
int *o_cells = NULL;
hipMalloc(&o_cells, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
SyncCells), dim3(gridBlock),dim3(threadBlock), 0, 0, i_cells,o_cells,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
SyncCells), dim3(gridBlock),dim3(threadBlock), 0, 0, i_cells,o_cells,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
SyncCells), dim3(gridBlock),dim3(threadBlock), 0, 0, i_cells,o_cells,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | afba48048e2e897f78a6f18cca63977c4862fc86.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "SyncCells.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *i_cells = NULL;
cudaMalloc(&i_cells, XSIZE*YSIZE);
int *o_cells = NULL;
cudaMalloc(&o_cells, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
SyncCells<<<gridBlock,threadBlock>>>(i_cells,o_cells,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
SyncCells<<<gridBlock,threadBlock>>>(i_cells,o_cells,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
SyncCells<<<gridBlock,threadBlock>>>(i_cells,o_cells,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b3cc4ae9716a9618111163866601baa4ef0bf6ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "RayTracing.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
#include "SphereCreator.h"
#include "length_cm.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __host__ void uploadGPU(Sphere* tabSphere);
extern __global__ void rayTracingGM(Sphere* ptrDevTabSphere, uchar4* ptrDevPixels, uint w, uint h, float t);
extern __global__ void rayTracingCM(uchar4* ptrDevPixels, uint w, uint h, float t);
extern __global__ void rayTracingSM(Sphere* ptrDevTabSphere, uchar4* ptrDevPixels, uint w, uint h, float t);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
RayTracing::RayTracing(const Grid& grid, uint w, uint h, float dt) :
Animable_I<uchar4>(grid, w, h, "RayTracing_Cuda_RGBA_uchar4")
{
// Inputs
this->dt = dt;
// Tools
this->t = 0; // protected dans Animable
SphereCreator sphereCreator(LENGTH_CM, w, h);
Sphere* ptrTabSphere = sphereCreator.getTabSphere();
uploadGPU(ptrTabSphere);
this->sizeOctetSpheres = LENGTH_CM * sizeof(Sphere);
// MM
{
// MM (malloc Device)
{
Device::malloc(&ptrDevTabSphere, sizeOctetSpheres);
Device::memclear(ptrDevTabSphere, sizeOctetSpheres);
}
// MM (copy Host->Device)
{
Device::memcpyHToD(ptrDevTabSphere, ptrTabSphere, sizeOctetSpheres);
}
}
}
RayTracing::~RayTracing()
{
Device::free(ptrDevTabSphere);
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void RayTracing::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
/*
static int i = 0;
if (i % 3 == 0)
{
rayTracingGM<<<dg,db>>>(this->ptrDevTabSphere, ptrDevPixels, w, h, t);
}
else if (i % 3 == 1)
{
rayTracingCM<<<dg,db>>>(ptrDevPixels, w, h, t);
}
else if(i % 3 == 2)
{
rayTracingSM<<<dg,db, this->sizeOctetSpheres>>>(this->ptrDevTabSphere, ptrDevPixels, w, h, t);
}
i++;*/
int mp=Device::getMPCount();
int coreMP=Device::getCoreCountMP();
dim3 dg = dim3(mp, 2, 1); // disons, a optimiser selon le gpu, peut drastiqument ameliorer ou baisser les performances
dim3 db = dim3(coreMP, 2, 1);
hipLaunchKernelGGL(( rayTracingGM), dim3(dg),dim3(db), 0, 0, this->ptrDevTabSphere, ptrDevPixels, w, h, t);
//rayTracingCM<<<dg,db>>>(ptrDevPixels, w, h, t);
//rayTracingSM<<<dg,db, this->sizeOctetSpheres>>>(this->ptrDevTabSphere, ptrDevPixels, w, h, t);
}
/**
* Override
* Call periodicly by the API
*/
void RayTracing::animationStep()
{
t += dt;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| b3cc4ae9716a9618111163866601baa4ef0bf6ca.cu | #include "RayTracing.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
#include "SphereCreator.h"
#include "length_cm.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __host__ void uploadGPU(Sphere* tabSphere);
extern __global__ void rayTracingGM(Sphere* ptrDevTabSphere, uchar4* ptrDevPixels, uint w, uint h, float t);
extern __global__ void rayTracingCM(uchar4* ptrDevPixels, uint w, uint h, float t);
extern __global__ void rayTracingSM(Sphere* ptrDevTabSphere, uchar4* ptrDevPixels, uint w, uint h, float t);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
RayTracing::RayTracing(const Grid& grid, uint w, uint h, float dt) :
Animable_I<uchar4>(grid, w, h, "RayTracing_Cuda_RGBA_uchar4")
{
// Inputs
this->dt = dt;
// Tools
this->t = 0; // protected dans Animable
SphereCreator sphereCreator(LENGTH_CM, w, h);
Sphere* ptrTabSphere = sphereCreator.getTabSphere();
uploadGPU(ptrTabSphere);
this->sizeOctetSpheres = LENGTH_CM * sizeof(Sphere);
// MM
{
// MM (malloc Device)
{
Device::malloc(&ptrDevTabSphere, sizeOctetSpheres);
Device::memclear(ptrDevTabSphere, sizeOctetSpheres);
}
// MM (copy Host->Device)
{
Device::memcpyHToD(ptrDevTabSphere, ptrTabSphere, sizeOctetSpheres);
}
}
}
RayTracing::~RayTracing()
{
Device::free(ptrDevTabSphere);
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void RayTracing::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
/*
static int i = 0;
if (i % 3 == 0)
{
rayTracingGM<<<dg,db>>>(this->ptrDevTabSphere, ptrDevPixels, w, h, t);
}
else if (i % 3 == 1)
{
rayTracingCM<<<dg,db>>>(ptrDevPixels, w, h, t);
}
else if(i % 3 == 2)
{
rayTracingSM<<<dg,db, this->sizeOctetSpheres>>>(this->ptrDevTabSphere, ptrDevPixels, w, h, t);
}
i++;*/
int mp=Device::getMPCount();
int coreMP=Device::getCoreCountMP();
dim3 dg = dim3(mp, 2, 1); // disons, a optimiser selon le gpu, peut drastiqument ameliorer ou baisser les performances
dim3 db = dim3(coreMP, 2, 1);
rayTracingGM<<<dg,db>>>(this->ptrDevTabSphere, ptrDevPixels, w, h, t);
//rayTracingCM<<<dg,db>>>(ptrDevPixels, w, h, t);
//rayTracingSM<<<dg,db, this->sizeOctetSpheres>>>(this->ptrDevTabSphere, ptrDevPixels, w, h, t);
}
/**
* Override
* Call periodicly by the API
*/
void RayTracing::animationStep()
{
t += dt;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
f9e9cb6ac9513c97be0d31e1d8036592b6f316d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
extern "C" {
__global__
void find_roots(int N, int chunk, int* parents) {
int jump = N/chunk;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
bool flag = true;
while (flag) {
flag = false;
for (int i=0; i<chunk; ++i) {
if (parents[x + i*jump] != parents[parents[x+ i*jump]]) {
parents[x + i*jump] = parents[parents[x+ i*jump]];
flag = true;
}
}
}
}
}
| f9e9cb6ac9513c97be0d31e1d8036592b6f316d9.cu | #include <cstdio>
extern "C" {
__global__
void find_roots(int N, int chunk, int* parents) {
int jump = N/chunk;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
bool flag = true;
while (flag) {
flag = false;
for (int i=0; i<chunk; ++i) {
if (parents[x + i*jump] != parents[parents[x+ i*jump]]) {
parents[x + i*jump] = parents[parents[x+ i*jump]];
flag = true;
}
}
}
}
}
|
3255d7834fc096b3c783bbe10a2f00af1f30b83c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zlascl2.cu, normal z -> s, Sun Nov 20 20:20:29 2016
@author Theo Mary
*/
#include "magma_internal.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
slascl2_full(int m, int n, const float* D, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
slascl2_lower(int m, int n, const float* D, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
slascl2_upper(int m, int n, const float *D, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/***************************************************************************//**
Purpose
-------
SLASCL2 scales the M by N real matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
dD REAL vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@see magma_slascl_diag
@ingroup magma_lascl_diag
*******************************************************************************/
extern "C" void
magmablas_slascl2(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dD,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -5;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( magma_ceildiv( m, NB ) );
dim3 threads( NB );
if (type == MagmaLower) {
hipLaunchKernelGGL(( slascl2_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda);
}
else if (type == MagmaUpper) {
hipLaunchKernelGGL(( slascl2_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda);
}
else if (type == MagmaFull) {
hipLaunchKernelGGL(( slascl2_full) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda);
}
}
| 3255d7834fc096b3c783bbe10a2f00af1f30b83c.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zlascl2.cu, normal z -> s, Sun Nov 20 20:20:29 2016
@author Theo Mary
*/
#include "magma_internal.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
slascl2_full(int m, int n, const float* D, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
slascl2_lower(int m, int n, const float* D, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
slascl2_upper(int m, int n, const float *D, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/***************************************************************************//**
Purpose
-------
SLASCL2 scales the M by N real matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
dD REAL vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@see magma_slascl_diag
@ingroup magma_lascl_diag
*******************************************************************************/
extern "C" void
magmablas_slascl2(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dD,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -5;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( magma_ceildiv( m, NB ) );
dim3 threads( NB );
if (type == MagmaLower) {
slascl2_lower <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda);
}
else if (type == MagmaUpper) {
slascl2_upper <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda);
}
else if (type == MagmaFull) {
slascl2_full <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda);
}
}
|
bf773f4b816d398601b1354c49c4eb798369f3aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/local/cuda-convnet2/img_acts.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* --------------------------------------------------------------------------
* * This file has been modified by Megvii ("Megvii Modifications").
* * All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
* --------------------------------------------------------------------------
*/
#include "cudaconv2.cuh"
#include "nvmatrix.cuh"
#include "img_acts/img_act_templates.cuh"
#ifdef _WIN32
#define _Pragma(x)
#endif
namespace megdnn {
namespace cuda {
/*
* New Titan-optimized stuff.
*/
__device__ __forceinline__ void conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(const int my, const int mx, const int numModulesX,
const int paddingStart, const int moduleStride, const int blockPixelIdxY, const int blockPixelIdxX, const int filterSize, int &moduleIdx, int &pxIdxInFilter) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
moduleIdx = my * numModulesX + mx; // out
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; // out
}
#define IA_PRELOAD_LOOP(w,offset) _Pragma("unroll") \
for (int i = 0; i < imgsPerThread; i++) { \
_Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
} \
/*
* Same loop as above but inverted.
*/
#define IA_PRELOAD_LOOP2(w,offset) _Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
_Pragma("unroll") \
for (int i = 0; i < imgsPerThread; i++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
} \
#define IA_PRELOAD_LOOP3(i,offset) _Pragma("unroll") \
for (int w = 0; w < filterCacheH; w++) { \
_Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
} \
#define IA_PRELOAD_W(z) wPreload[z] = fLoad[(z) * B_X*B_Y/filterCacheF * filterPixels * numFilters];
#define IA_PRELOAD_W_TX(z) wPreload[z] = tex1Dfetch<float>(filters, filtersLoadOffset + (z) * B_X*B_Y/filterCacheF * filterPixels * numFilters);
#define IA_PRELOAD_H(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \
hPreload[y][x] = hLoad[(y) * B_Y * numModules * numImages + (x) * B_X]; \
}
#define IA_PRELOAD_H_TX(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \
hPreload[y][x] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + (y) * B_Y * numModules * numImages + (x) * B_X); \
}
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv>
__global__ void
__launch_bounds__(256, 2) // 256 threads per block, 2 blocks per multiprocessor
// These launch bounds ensure 25% occupancy (128 registers used)
// as oppposed to 13% (130 registers) achieved by defaults.
conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex(hipTextureObject_t hidActs, hipTextureObject_t filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][filterCacheF];
__shared__ float shHidActs[filterCacheH][B_X*imgsPerThread];
fill_shared_mem<float>((float *)shFilters, sizeof(shFilters)/sizeof(float), 0);
fill_shared_mem<float>((float *)shHidActs, sizeof(shHidActs)/sizeof(float), 0);
__syncthreads();
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int myCaseIdx = blockCaseIdx + threadIdx.x;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
// const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X;
//const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread);
const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF;
// nvcc is behaving idiotically again, these useless declarations save registers
//const int outputY = threadIdx.y, outputX = threadIdx.x;
//const int ty = threadIdx.y, tx = threadIdx.x;
const int numModules = numModulesY * numModulesX;
const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
// hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
// filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread];
//const bool noFLoop = filterCacheF == filterCacheH;
/*
* Initial preload
*/
float hPreload[filterCacheH/B_Y][imgsPerThread]; // [2][4]
float wPreload[filterCacheF*colorsPerThread/B_X]; // [8]
int moduleIdx, pxIdxInFilter;
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter);
// const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0]
// : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0];
int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + 0
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters);
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters);
}
}
// const float* hLoad = &hidActs[(moduleIdx + 0 * numModules) * numImages];
int hidActsLoadOffset = hidActsOffset + (moduleIdx + 0 * numModules) * numImages;
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X);
}
}
}
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext;
const bool lastModule = my == endY - 1 && mx == endX - 1;
if (!lastModule) {
mxNext = mx + 1 == endX ? startX : mx + 1;
myNext = my + (mx + 1 == endX);
}
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext);
for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)];
}
}
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF);
if (f == numFiltersPerGroup - filterCacheF) {
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters
: moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters);
}
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i];
}
}
}
}
__syncthreads();
hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheH) * numModules) * numImages;
#pragma unroll
for (int z = 0; z < 4; ++z) {
IA_PRELOAD_LOOP(z,0);
IA_PRELOAD_W_TX(z);
}
#pragma unroll
for (int z = 4; z < 12; ++z) {
IA_PRELOAD_LOOP(z,0);
IA_PRELOAD_H_TX((z-4)/4,z%4);
}
#pragma unroll
for (int z = 12; z < 16; ++z) {
IA_PRELOAD_LOOP(z,0);
}
__syncthreads();
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i];
}
}
}
}
__syncthreads();
hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages;
if (f == numFiltersPerGroup - filterCacheF) {
hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages;
}
#pragma unroll
for (int z = 0; z < 4; ++z) {
IA_PRELOAD_LOOP(z,filterCacheH);
IA_PRELOAD_W_TX(z+4);
}
#pragma unroll
for (int z = 4; z < 12; ++z) {
IA_PRELOAD_LOOP(z,filterCacheH);
IA_PRELOAD_H_TX((z-4)/4, z%4);
}
#pragma unroll
for (int z = 12; z < 16; ++z) {
IA_PRELOAD_LOOP(z,filterCacheH);
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv>
__global__ void
//__launch_bounds__(128, 3) // 128 threads per block, 3 blocks per multiprocessor
conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16(hipTextureObject_t hidActs, hipTextureObject_t filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][filterCacheF];
__shared__ float shHidActs[filterCacheH][B_X*imgsPerThread];
fill_shared_mem<float>((float *)shFilters, sizeof(shFilters)/sizeof(float), 0);
fill_shared_mem<float>((float *)shHidActs, sizeof(shHidActs)/sizeof(float), 0);
__syncthreads();
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int myCaseIdx = blockCaseIdx + threadIdx.x;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
// const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X;
//const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread);
const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF;
// nvcc is behaving idiotically again, these useless declarations save registers
//const int outputY = threadIdx.y, outputX = threadIdx.x;
//const int ty = threadIdx.y, tx = threadIdx.x;
const int numModules = numModulesY * numModulesX;
const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
// hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
// filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread];
//const bool noFLoop = filterCacheF == filterCacheH;
/*
* Initial preload
*/
float hPreload[filterCacheH/B_Y][imgsPerThread]; // [4][4]
float wPreload[filterCacheF*colorsPerThread/B_X]; // [6]
int moduleIdx, pxIdxInFilter;
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter);
// const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0]
// : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0];
int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters);
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters);
}
}
// const float* hLoad = &hidActs[moduleIdx * numImages];
int hidActsLoadOffset = hidActsOffset + moduleIdx * numImages;
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X);
}
}
}
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext;
const bool lastModule = my == endY - 1 && mx == endX - 1;
if (!lastModule) {
mxNext = mx + 1 == endX ? startX : mx + 1;
myNext = my + (mx + 1 == endX);
}
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext);
for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)];
}
}
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF);
if (f == numFiltersPerGroup - filterCacheF) {
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters
: moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters);
}
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i];
}
}
}
}
hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages;
if (f == numFiltersPerGroup - filterCacheF) {
hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages;
}
__syncthreads();
// It seems that there is no point explicitly interleaving loads
// and computations because the scheduler does that anyway.
IA_PRELOAD_LOOP2(0,0);
IA_PRELOAD_LOOP2(1,0);
IA_PRELOAD_LOOP2(2,0);
IA_PRELOAD_LOOP2(3,0);
IA_PRELOAD_LOOP2(4,0);
IA_PRELOAD_LOOP2(5,0);
IA_PRELOAD_LOOP2(6,0);
IA_PRELOAD_LOOP2(7,0);
IA_PRELOAD_LOOP2(8,0);
IA_PRELOAD_LOOP2(9,0);
IA_PRELOAD_LOOP2(10,0);
IA_PRELOAD_LOOP2(11,0);
IA_PRELOAD_LOOP2(12,0);
IA_PRELOAD_LOOP2(13,0);
IA_PRELOAD_LOOP2(14,0);
IA_PRELOAD_LOOP2(15,0);
IA_PRELOAD_W_TX(0);
IA_PRELOAD_W_TX(1);
IA_PRELOAD_W_TX(2);
IA_PRELOAD_W_TX(3);
IA_PRELOAD_W_TX(4);
IA_PRELOAD_W_TX(5);
IA_PRELOAD_H_TX(0,0);
IA_PRELOAD_H_TX(0,1);
IA_PRELOAD_H_TX(0,2);
IA_PRELOAD_H_TX(0,3);
IA_PRELOAD_H_TX(1,0);
IA_PRELOAD_H_TX(1,1);
IA_PRELOAD_H_TX(1,2);
IA_PRELOAD_H_TX(1,3);
IA_PRELOAD_H_TX(2,0);
IA_PRELOAD_H_TX(2,1);
IA_PRELOAD_H_TX(2,2);
IA_PRELOAD_H_TX(2,3);
IA_PRELOAD_H_TX(3,0);
IA_PRELOAD_H_TX(3,1);
IA_PRELOAD_H_TX(3,2);
IA_PRELOAD_H_TX(3,3);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _imgActs(hipStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numFilterColors = numImgColors / numGroups;
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
int numModules = hidActs.getNumRows() / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSizeY * imgSizeX;
int numModulesX = numModules / numModulesY;
megdnn_assert_internal(numImgColors % numGroups == 0);
//megdnn_assert_internal(numFilters % (16*numGroups) == 0); // TODO: insisting on 32 filters due to bug in calling code below. fix that.
bool previous_limit = (numFilters % (16 * numGroups)) == 0;
megdnn_assert_internal(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
megdnn_assert_internal(numGroups == 1 || numFilterColors % 4 == 0);
megdnn_assert_internal(filterPixels == filterSize * filterSize);
megdnn_assert_internal(hidActs.getNumRows() == numModules * numFilters);
megdnn_assert_internal(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
megdnn_assert_internal(numModules == numModulesY * numModulesX);
megdnn_assert_internal(hidActs.isContiguous());
megdnn_assert_internal(filters.isContiguous());
megdnn_assert_internal(!hidActs.isTrans());
megdnn_assert_internal(!filters.isTrans());
megdnn_assert_internal(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in the convolution
megdnn_assert_internal(paddingStart <= 0);
megdnn_assert_internal(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
megdnn_assert_internal(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
megdnn_assert_internal(moduleStride <= filterSize);
megdnn_assert_internal(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads;
int colorsPerThread = 0, imgsPerThread = 0;
if (numFilterColors % 8 == 0) {
threads = dim3(32, numFilterColors % 64 == 0 ? 8 : 4);
colorsPerThread = numFilterColors % 64 == 0 ? 8
: numFilterColors % 48 == 0 ? 12
: numFilterColors % 32 == 0 ? 8
: numFilterColors % 16 == 0 ? 4
: 2;
imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
megdnn_assert_internal(numFilterColors % (threads.y * colorsPerThread) == 0);
//previous_limit = numFilterColors % (threads.y * colorsPerThread) == 0;
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels);
// NOTE: the case when channels % 32 == 0 but channels % 48 != 0 and channels % 64 != 0 has not been optimized!!
} else if (numFilterColors > 3) {
// NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!!
imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
threads = dim3(16, 16);
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
} else {
// NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!!
imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
threads = dim3(16, 16);
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
}
bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(numImgColors*imgPixels, numImages);
} else {
megdnn_assert_internal(targets.getNumRows() == numImgColors * imgPixels);
megdnn_assert_internal(targets.getNumCols() == numImages);
}
const bool scale = scaleTargets != 0;
// hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, hipFuncCachePreferShared);
// conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(
// hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize,
// imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
//return;
// printf("conv: %d\n", conv);
// printf("scale: %d\n", scale);
// printf("checkCaseBounds: %d\n", checkCaseBounds);
// printf("numFilterColors: %d\n", numFilterColors);
// printf("numImages: %d\n", numImages);
// hipStream_t stream = NVMatrix::getDefaultStream();
if (conv == false) {
if (scale == false) {
if (checkCaseBounds == false) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
if (previous_limit) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
if (previous_limit) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 12, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 8, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 4, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 3, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 3, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 1, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 1, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
else if (checkCaseBounds == true) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
/*
else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
*/
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 3, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 3, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 1, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 1, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
}
}
getLastCudaError("imgActs: kernel execution failed");
}
void convImgActs(hipStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true);
}
void convImgActs(hipStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true);
}
void localImgActs(hipStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false);
}
void localImgActs(hipStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false);
}
} // namespace cuda
} // namespace megdnn
| bf773f4b816d398601b1354c49c4eb798369f3aa.cu | /**
* \file dnn/src/cuda/local/cuda-convnet2/img_acts.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* --------------------------------------------------------------------------
* * This file has been modified by Megvii ("Megvii Modifications").
* * All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
* --------------------------------------------------------------------------
*/
#include "cudaconv2.cuh"
#include "nvmatrix.cuh"
#include "img_acts/img_act_templates.cuh"
#ifdef _WIN32
#define _Pragma(x)
#endif
namespace megdnn {
namespace cuda {
/*
* New Titan-optimized stuff.
*/
__device__ __forceinline__ void conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(const int my, const int mx, const int numModulesX,
const int paddingStart, const int moduleStride, const int blockPixelIdxY, const int blockPixelIdxX, const int filterSize, int &moduleIdx, int &pxIdxInFilter) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
moduleIdx = my * numModulesX + mx; // out
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; // out
}
#define IA_PRELOAD_LOOP(w,offset) _Pragma("unroll") \
for (int i = 0; i < imgsPerThread; i++) { \
_Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
} \
/*
* Same loop as above but inverted.
*/
#define IA_PRELOAD_LOOP2(w,offset) _Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
_Pragma("unroll") \
for (int i = 0; i < imgsPerThread; i++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
} \
#define IA_PRELOAD_LOOP3(i,offset) _Pragma("unroll") \
for (int w = 0; w < filterCacheH; w++) { \
_Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
} \
#define IA_PRELOAD_W(z) wPreload[z] = fLoad[(z) * B_X*B_Y/filterCacheF * filterPixels * numFilters];
#define IA_PRELOAD_W_TX(z) wPreload[z] = tex1Dfetch<float>(filters, filtersLoadOffset + (z) * B_X*B_Y/filterCacheF * filterPixels * numFilters);
#define IA_PRELOAD_H(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \
hPreload[y][x] = hLoad[(y) * B_Y * numModules * numImages + (x) * B_X]; \
}
#define IA_PRELOAD_H_TX(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \
hPreload[y][x] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + (y) * B_Y * numModules * numImages + (x) * B_X); \
}
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv>
__global__ void
__launch_bounds__(256, 2) // 256 threads per block, 2 blocks per multiprocessor
// These launch bounds ensure 25% occupancy (128 registers used)
// as oppposed to 13% (130 registers) achieved by defaults.
conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex(cudaTextureObject_t hidActs, cudaTextureObject_t filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][filterCacheF];
__shared__ float shHidActs[filterCacheH][B_X*imgsPerThread];
fill_shared_mem<float>((float *)shFilters, sizeof(shFilters)/sizeof(float), 0);
fill_shared_mem<float>((float *)shHidActs, sizeof(shHidActs)/sizeof(float), 0);
__syncthreads();
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int myCaseIdx = blockCaseIdx + threadIdx.x;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
// const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X;
//const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread);
const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF;
// nvcc is behaving idiotically again, these useless declarations save registers
//const int outputY = threadIdx.y, outputX = threadIdx.x;
//const int ty = threadIdx.y, tx = threadIdx.x;
const int numModules = numModulesY * numModulesX;
const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
// hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
// filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread];
//const bool noFLoop = filterCacheF == filterCacheH;
/*
* Initial preload
*/
float hPreload[filterCacheH/B_Y][imgsPerThread]; // [2][4]
float wPreload[filterCacheF*colorsPerThread/B_X]; // [8]
int moduleIdx, pxIdxInFilter;
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter);
// const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0]
// : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0];
int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + 0
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters);
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters);
}
}
// const float* hLoad = &hidActs[(moduleIdx + 0 * numModules) * numImages];
int hidActsLoadOffset = hidActsOffset + (moduleIdx + 0 * numModules) * numImages;
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X);
}
}
}
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext;
const bool lastModule = my == endY - 1 && mx == endX - 1;
if (!lastModule) {
mxNext = mx + 1 == endX ? startX : mx + 1;
myNext = my + (mx + 1 == endX);
}
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext);
for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)];
}
}
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF);
if (f == numFiltersPerGroup - filterCacheF) {
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters
: moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters);
}
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i];
}
}
}
}
__syncthreads();
hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheH) * numModules) * numImages;
#pragma unroll
for (int z = 0; z < 4; ++z) {
IA_PRELOAD_LOOP(z,0);
IA_PRELOAD_W_TX(z);
}
#pragma unroll
for (int z = 4; z < 12; ++z) {
IA_PRELOAD_LOOP(z,0);
IA_PRELOAD_H_TX((z-4)/4,z%4);
}
#pragma unroll
for (int z = 12; z < 16; ++z) {
IA_PRELOAD_LOOP(z,0);
}
__syncthreads();
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i];
}
}
}
}
__syncthreads();
hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages;
if (f == numFiltersPerGroup - filterCacheF) {
hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages;
}
#pragma unroll
for (int z = 0; z < 4; ++z) {
IA_PRELOAD_LOOP(z,filterCacheH);
IA_PRELOAD_W_TX(z+4);
}
#pragma unroll
for (int z = 4; z < 12; ++z) {
IA_PRELOAD_LOOP(z,filterCacheH);
IA_PRELOAD_H_TX((z-4)/4, z%4);
}
#pragma unroll
for (int z = 12; z < 16; ++z) {
IA_PRELOAD_LOOP(z,filterCacheH);
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv>
__global__ void
//__launch_bounds__(128, 3) // 128 threads per block, 3 blocks per multiprocessor
conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16(cudaTextureObject_t hidActs, cudaTextureObject_t filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][filterCacheF];
__shared__ float shHidActs[filterCacheH][B_X*imgsPerThread];
fill_shared_mem<float>((float *)shFilters, sizeof(shFilters)/sizeof(float), 0);
fill_shared_mem<float>((float *)shHidActs, sizeof(shHidActs)/sizeof(float), 0);
__syncthreads();
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int myCaseIdx = blockCaseIdx + threadIdx.x;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
// const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X;
//const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread);
const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF;
// nvcc is behaving idiotically again, these useless declarations save registers
//const int outputY = threadIdx.y, outputX = threadIdx.x;
//const int ty = threadIdx.y, tx = threadIdx.x;
const int numModules = numModulesY * numModulesX;
const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
// hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
// filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread];
//const bool noFLoop = filterCacheF == filterCacheH;
/*
* Initial preload
*/
float hPreload[filterCacheH/B_Y][imgsPerThread]; // [4][4]
float wPreload[filterCacheF*colorsPerThread/B_X]; // [6]
int moduleIdx, pxIdxInFilter;
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter);
// const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0]
// : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0];
int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters);
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters);
}
}
// const float* hLoad = &hidActs[moduleIdx * numImages];
int hidActsLoadOffset = hidActsOffset + moduleIdx * numImages;
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X);
}
}
}
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext;
const bool lastModule = my == endY - 1 && mx == endX - 1;
if (!lastModule) {
mxNext = mx + 1 == endX ? startX : mx + 1;
myNext = my + (mx + 1 == endX);
}
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext);
for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)];
}
}
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF);
if (f == numFiltersPerGroup - filterCacheF) {
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters
: moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters);
}
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i];
}
}
}
}
hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages;
if (f == numFiltersPerGroup - filterCacheF) {
hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages;
}
__syncthreads();
// It seems that there is no point explicitly interleaving loads
// and computations because the scheduler does that anyway.
IA_PRELOAD_LOOP2(0,0);
IA_PRELOAD_LOOP2(1,0);
IA_PRELOAD_LOOP2(2,0);
IA_PRELOAD_LOOP2(3,0);
IA_PRELOAD_LOOP2(4,0);
IA_PRELOAD_LOOP2(5,0);
IA_PRELOAD_LOOP2(6,0);
IA_PRELOAD_LOOP2(7,0);
IA_PRELOAD_LOOP2(8,0);
IA_PRELOAD_LOOP2(9,0);
IA_PRELOAD_LOOP2(10,0);
IA_PRELOAD_LOOP2(11,0);
IA_PRELOAD_LOOP2(12,0);
IA_PRELOAD_LOOP2(13,0);
IA_PRELOAD_LOOP2(14,0);
IA_PRELOAD_LOOP2(15,0);
IA_PRELOAD_W_TX(0);
IA_PRELOAD_W_TX(1);
IA_PRELOAD_W_TX(2);
IA_PRELOAD_W_TX(3);
IA_PRELOAD_W_TX(4);
IA_PRELOAD_W_TX(5);
IA_PRELOAD_H_TX(0,0);
IA_PRELOAD_H_TX(0,1);
IA_PRELOAD_H_TX(0,2);
IA_PRELOAD_H_TX(0,3);
IA_PRELOAD_H_TX(1,0);
IA_PRELOAD_H_TX(1,1);
IA_PRELOAD_H_TX(1,2);
IA_PRELOAD_H_TX(1,3);
IA_PRELOAD_H_TX(2,0);
IA_PRELOAD_H_TX(2,1);
IA_PRELOAD_H_TX(2,2);
IA_PRELOAD_H_TX(2,3);
IA_PRELOAD_H_TX(3,0);
IA_PRELOAD_H_TX(3,1);
IA_PRELOAD_H_TX(3,2);
IA_PRELOAD_H_TX(3,3);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _imgActs(cudaStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numFilterColors = numImgColors / numGroups;
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
int numModules = hidActs.getNumRows() / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSizeY * imgSizeX;
int numModulesX = numModules / numModulesY;
megdnn_assert_internal(numImgColors % numGroups == 0);
//megdnn_assert_internal(numFilters % (16*numGroups) == 0); // TODO: insisting on 32 filters due to bug in calling code below. fix that.
bool previous_limit = (numFilters % (16 * numGroups)) == 0;
megdnn_assert_internal(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
megdnn_assert_internal(numGroups == 1 || numFilterColors % 4 == 0);
megdnn_assert_internal(filterPixels == filterSize * filterSize);
megdnn_assert_internal(hidActs.getNumRows() == numModules * numFilters);
megdnn_assert_internal(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
megdnn_assert_internal(numModules == numModulesY * numModulesX);
megdnn_assert_internal(hidActs.isContiguous());
megdnn_assert_internal(filters.isContiguous());
megdnn_assert_internal(!hidActs.isTrans());
megdnn_assert_internal(!filters.isTrans());
megdnn_assert_internal(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in the convolution
megdnn_assert_internal(paddingStart <= 0);
megdnn_assert_internal(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
megdnn_assert_internal(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
megdnn_assert_internal(moduleStride <= filterSize);
megdnn_assert_internal(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads;
int colorsPerThread = 0, imgsPerThread = 0;
if (numFilterColors % 8 == 0) {
threads = dim3(32, numFilterColors % 64 == 0 ? 8 : 4);
colorsPerThread = numFilterColors % 64 == 0 ? 8
: numFilterColors % 48 == 0 ? 12
: numFilterColors % 32 == 0 ? 8
: numFilterColors % 16 == 0 ? 4
: 2;
imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
megdnn_assert_internal(numFilterColors % (threads.y * colorsPerThread) == 0);
//previous_limit = numFilterColors % (threads.y * colorsPerThread) == 0;
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels);
// NOTE: the case when channels % 32 == 0 but channels % 48 != 0 and channels % 64 != 0 has not been optimized!!
} else if (numFilterColors > 3) {
// NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!!
imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
threads = dim3(16, 16);
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
} else {
// NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!!
imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
threads = dim3(16, 16);
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
}
bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(numImgColors*imgPixels, numImages);
} else {
megdnn_assert_internal(targets.getNumRows() == numImgColors * imgPixels);
megdnn_assert_internal(targets.getNumCols() == numImages);
}
const bool scale = scaleTargets != 0;
// cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared);
// conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(
// hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize,
// imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
//return;
// printf("conv: %d\n", conv);
// printf("scale: %d\n", scale);
// printf("checkCaseBounds: %d\n", checkCaseBounds);
// printf("numFilterColors: %d\n", numFilterColors);
// printf("numImages: %d\n", numImages);
// cudaStream_t stream = NVMatrix::getDefaultStream();
if (conv == false) {
if (scale == false) {
if (checkCaseBounds == false) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
if (previous_limit) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
if (previous_limit) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 8, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 4, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 2, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 2, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 8, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 4, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 3, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 8, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 3, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 4, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 8, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 4, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 1, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 8, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 1, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 4, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
else if (checkCaseBounds == true) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 2, 4, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
/*
else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
*/
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, true, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 3, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, true, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 1, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
}
}
getLastCudaError("imgActs: kernel execution failed");
}
void convImgActs(cudaStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true);
}
void convImgActs(cudaStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true);
}
void localImgActs(cudaStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false);
}
void localImgActs(cudaStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false);
}
} // namespace cuda
} // namespace megdnn
|
e6f6cb377ed4ee737793351869f0ff2891413be2.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/hip/HIPBlas.h>
namespace at { namespace native {
Tensor baddbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm");
return legacy::cuda::_th_baddbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm_out_cuda(Tensor &result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm_out");
return legacy::cuda::_th_baddbmm_out(result, b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm__cuda(Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
return baddbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
}
Tensor& bmm_out_cuda(Tensor &result, const Tensor& batch1, const Tensor& batch2) {
result.resize_({ batch1.size(0), batch1.size(1), batch2.size(2) });
return legacy::cuda::_th_bmm_out(result, batch1, batch2);
}
Tensor bmm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({0}, self.options());
return native::bmm_out_cuda(result, self, mat2);
}
Tensor prepare_matrix_for_cublas(Tensor& tensor, bool& transpose_tensor) {
Tensor tensor_;
IntArrayRef tensor_strides = tensor.strides();
IntArrayRef tensor_sizes = tensor.sizes();
if ((tensor_strides[0] == 1) && (tensor_strides[1] >= std::max<int64_t>(1, tensor_sizes[0]))) {
tensor_ = tensor;
transpose_tensor = false;
} else if ((tensor_strides[1] == 1) && (tensor_strides[0] >= std::max<int64_t>(1, tensor_sizes[1]))) {
tensor_ = tensor;
transpose_tensor = true;
} else {
transpose_tensor = true;
tensor_ = tensor.clone(at::MemoryFormat::Contiguous);
}
return tensor_;
}
namespace {
Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
TORCH_CHECK(mat1.dim() == 2 && mat2.dim() == 2, "tensors must be 2-D");
TensorArg args[]{{result, "out", 0}, {self, "self", 1}, {mat1, "mat1", 2}, {mat2, "mat2", 3}};
checkAllSameGPU("addmm", args);
Tensor self_;
if (&result != &self) {
std::tie(self_) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
} else {
self_ = self;
}
IntArrayRef mat1_sizes = mat1.sizes();
IntArrayRef mat2_sizes = mat2.sizes();
IntArrayRef self__sizes = self_.sizes();
TORCH_CHECK(mat1_sizes[1] == mat2_sizes[0], "mat1 dim 1 must match mat2 dim 0");
TORCH_CHECK(self__sizes[0] == mat1_sizes[0], "self_ dim 0 must match mat1 dim 0");
TORCH_CHECK(self__sizes[1] == mat2_sizes[1], "self_ dim 1 must match mat2 dim 1");
if (&result != &self) {
at::native::resize_as_(result, self_);
if (beta.to<double>() != 0.0) {
at::native::copy_(result, self_);
}
}
TORCH_CHECK(result.dim() == 2 && self_.dim() == 2, "tensors must be 2-D");
IntArrayRef result_sizes = result.sizes();
if ((result_sizes[0] == 0) || (result_sizes[1] == 0)) {
return result;
}
bool transpose_result;
Tensor result_ = prepare_matrix_for_cublas(result, transpose_result);
bool transpose_mat1;
bool transpose_mat2;
Tensor mat1_ = transpose_result ? mat2 : mat1;
Tensor mat2_ = transpose_result ? mat1 : mat2;
mat1_ = prepare_matrix_for_cublas(mat1_, transpose_mat1);
mat2_ = prepare_matrix_for_cublas(mat2_, transpose_mat2);
if (transpose_result) {
transpose_mat1 = !transpose_mat1;
transpose_mat2 = !transpose_mat2;
mat1_sizes = mat1_.sizes();
mat2_sizes = mat2_.sizes();
}
int64_t m = mat1_sizes[transpose_result ? 1 : 0];
int64_t k = mat1_sizes[transpose_result ? 0 : 1];
int64_t n = mat2_sizes[transpose_result ? 0 : 1];
int64_t mat1_ld = mat1_.stride((transpose_mat1 == transpose_result) ? 1 : 0);
int64_t mat2_ld = mat2_.stride((transpose_mat2 == transpose_result) ? 1 : 0);
int64_t result_ld = result_.stride(transpose_result ? 0 : 1);
at::ScalarType scalar_type = self_.scalar_type();
if (mat1.numel() == 0) {
return at::native::mul_out(result, self, at::native::scalar_tensor(beta, at::device(at::kCPU).dtype(self.scalar_type())));
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "addmm_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t* mat1_ptr = mat1_.data_ptr<scalar_t>();
scalar_t* mat2_ptr = mat2_.data_ptr<scalar_t>();
scalar_t* result_ptr = result_.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
transpose_mat1 ? 't' : 'n',
transpose_mat2 ? 't' : 'n',
m, n, k,
alpha_val,
mat1_ptr, mat1_ld,
mat2_ptr, mat2_ld,
beta_val,
result_ptr, result_ld
);
});
if (result.data_ptr() != result_.data_ptr()) {
result.copy_(result_);
}
return result;
}
} // anonymous namespace
Tensor& mm_out_cuda(Tensor& result, const Tensor& self, const Tensor& mat2) {
result.resize_({ self.size(0), mat2.size(1) });
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor mm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({ self.size(0), mat2.size(1) }, self.options());
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor& addmm_out_cuda(Tensor &out, const Tensor &self,
const Tensor &mat1, const Tensor &mat2,
Scalar beta, Scalar alpha) {
{
at::NoNamesGuard guard;
Tensor& result = addmm_out_cuda_impl(out, self, mat1, mat2, beta, alpha);
}
at::namedinference::propagate_names_for_addmm(out, mat1, mat2, self);
return out;
}
Tensor addmm_cuda(const Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
Tensor out = at::empty({0}, self.options());
addmm_out_cuda(out, self, mat1, mat2, beta, alpha);
return out;
}
Tensor& addmm__cuda(Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
addmm_out_cuda(self, self, mat1, mat2, beta, alpha);
return self;
}
template<typename scalar_t>
void addr_impl_ger_cuda(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
scalar_t alpha, scalar_t beta) {
static_assert(std::is_same<scalar_t, float>::value ||
std::is_same<scalar_t, double>::value,
"addr_impl_ger_cuda: only float and double are supported");
if (&out != &self) {
at::native::resize_as_(out, self);
at::native::copy_(out, self);
}
if (beta == 0.0) {
at::native::zero_(out);
}
if (beta != 1.0) {
at::native::mul_(out, beta);
}
if (out.stride(0) == 1) {
at::cuda::blas::ger<scalar_t>(
vec1.size(0), vec2.size(0), alpha,
vec1.data_ptr<scalar_t>(), vec1.stride(0),
vec2.data_ptr<scalar_t>(), vec2.stride(0),
out.data_ptr<scalar_t>(), out.stride(1)
);
} else if (out.stride(1) == 1) {
at::cuda::blas::ger<scalar_t>(
vec2.size(0), vec1.size(0), alpha,
vec2.data_ptr<scalar_t>(), vec2.stride(0),
vec1.data_ptr<scalar_t>(), vec1.stride(0),
out.data_ptr<scalar_t>(), out.stride(0)
);
} else {
Tensor cr = out.clone();
at::cuda::blas::ger<scalar_t>(
vec2.size(0), vec1.size(0), alpha,
vec2.data_ptr<scalar_t>(), vec2.stride(0),
vec1.data_ptr<scalar_t>(), vec1.stride(0),
out.data_ptr<scalar_t>(), out.stride(0)
);
out.set_(cr);
}
}
template<typename scalar_t>
void addr_impl_cuda(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
scalar_t alpha, scalar_t beta) {
// currently no Hger/SgerEx in Cublas.
Tensor vec2T = vec2.reshape({1, vec2.size(0)});
Tensor vec1M = vec1.reshape({vec1.size(0), 1});
addmm_out_cuda(out, self, vec1M, vec2T, beta, alpha);
}
template<>
void addr_impl_cuda<float>(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
float alpha, float beta) {
addr_impl_ger_cuda<float>(out, self, vec1, vec2, alpha, beta);
}
template<>
void addr_impl_cuda<double>(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
double alpha, double beta) {
addr_impl_ger_cuda<double>(out, self, vec1, vec2, alpha, beta);
}
Tensor& addr_out_cuda(Tensor &out, const Tensor& self,
const Tensor& vec1, const Tensor& vec2,
Scalar beta, Scalar alpha) {
TORCH_CHECK(vec1.dim() == 1 && vec2.dim() == 1,
"vec1 and vec2 should be 1-dimensional vectors. Got dimensions ",
vec1.dim(), " and ", vec2.dim());
Tensor self_;
if (&out != &self) {
std::tie(self_) = expand_size(self, {vec1.size(0), vec2.size(0)}, "addr");
} else {
self_ = self;
}
TORCH_CHECK(out.device() == self_.device() &&
out.device() == vec1.device() &&
out.device() == vec2.device(),
"Expected all tensors to be on the same device. Found: ",
out.device(), ", ", self_.device(), ", ",
vec1.device(), " and ", vec2.device());
TORCH_CHECK(self_.dim() == 2,
"2D tensor expected, got ", self_.dim(), "D tensor for input");
TORCH_CHECK(self_.size(0) == vec1.size(0) && self_.size(1) == vec2.size(0),
"size mismatch",
", input: ", self_.sizes(),
", v1: ", vec1.sizes(),
", v2: ", vec2.sizes());
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self_.scalar_type(), "addr_out_cuda", [&] {
addr_impl_cuda<scalar_t>(out, self_, vec1, vec2,
alpha.to<scalar_t>(), beta.to<scalar_t>());
});
return out;
}
Tensor& addr__cuda(Tensor& self,
const Tensor& vec1, const Tensor& vec2,
Scalar beta, Scalar alpha) {
addr_out_cuda(self, self, vec1, vec2, beta, alpha);
return self;
}
Tensor addr_cuda(const Tensor& self,
const Tensor& vec1, const Tensor& vec2,
Scalar beta, Scalar alpha) {
Tensor out = at::empty({0}, self.options());
addr_out_cuda(out, self, vec1, vec2, beta, alpha);
return out;
}
Tensor& addbmm_out_cuda(Tensor& out, const Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha) {
TORCH_CHECK(batch1.dim() == 3 && batch2.dim() == 3,
"Batch tensors should be 3D, got dimensions ", batch1.dim(),
" and ", batch2.dim());
Tensor self_;
if (&out != &self) {
std::tie(self_) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm");
} else {
self_ = self;
}
TORCH_CHECK(out.device() == self_.device() &&
out.device() == batch1.device() &&
out.device() == batch2.device(),
"Expected all tensors to be on the same device. Found: ",
out.device(), ", ", self_.device(), ", ",
batch1.device(), " and ", batch2.device());
TORCH_CHECK(self_.dim() == 2,
"2D tensor expected, got ", self_.dim(), "D tensor for input");
int64_t batchnum = batch1.size(0);
int64_t m1d1 = batch1.size(1);
int64_t innerdim = batch1.size(2);
int64_t m2d2 = batch2.size(2);
TORCH_CHECK(batchnum == batch2.size(0),
"equal number of batches expected");
TORCH_CHECK(m1d1 == self_.size(0),
"first dimension of batch1 must match first dimension of input");
TORCH_CHECK(m2d2 == self_.size(1),
"second dimension of batch2 must match second dimension of input");
TORCH_CHECK(innerdim == batch2.size(1),
"second dimension of batch1 must match first dimension of batch2");
if (&out != &self) {
at::native::resize_as_(out, self_);
if (beta.to<double>() != 0.0) {
at::native::copy_(out, self_);
}
}
for (int64_t i=0; i<batchnum; i++) {
addmm_out_cuda(out, out, batch1[i], batch2[i], beta, alpha);
beta = 1;
}
return out;
}
Tensor& addbmm__cuda(Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha) {
addbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
return self;
}
Tensor addbmm_cuda(const Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha)
{
Tensor out = at::empty({0}, self.options());
addbmm_out_cuda(out, self, batch1, batch2, beta, alpha);
return out;
}
Tensor dot_cuda(const Tensor& self, const Tensor& other) {
at::NoNamesGuard guard;
TORCH_CHECK(
self.dim() == 1 && other.dim() == 1,
"1D tensors expected, but got ",
self.dim(),
"D and ",
other.dim(),
"D tensors");
TORCH_CHECK(
self.scalar_type() == other.scalar_type(),
"dot : expected both vectors to have same dtype, but found ",
self.scalar_type(),
" and ",
other.scalar_type());
TORCH_CHECK(
self.numel() == other.numel(),
"inconsistent tensor size, expected tensor [",
self.numel(),
"] and src [",
other.numel(),
"] to have the same number of elements, but got ",
self.numel(),
" and ",
other.numel(),
" elements respectively");
TORCH_CHECK(
self.device() == other.device(),
"expected all tensors to be on the same device. Found: ",
self.device(),
", ",
other.device());
TORCH_CHECK(
(self.numel() <= INT_MAX) && (self.stride(0) <= INT_MAX) &&
(other.stride(0) <= INT_MAX),
"dot only supports n, incx, incy with the bound [val] <= %d",
INT_MAX);
const int n = static_cast<int>(self.numel());
int incx = static_cast<int>(self.stride(0));
int incy = static_cast<int>(other.stride(0));
if (n == 1) {
incx = 1;
incy = 1;
}
return AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, self.scalar_type(), "dot", [&] {
Tensor result = at::empty({}, self.options());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::PointerModeGuard pointerModeGuard(handle, HIPBLAS_POINTER_MODE_DEVICE);
at::cuda::blas::dot<scalar_t>(
handle,
n,
self.data_ptr<scalar_t>(),
incx,
other.data_ptr<scalar_t>(),
incy,
result.data_ptr<scalar_t>());
return result;
});
}
} }
| e6f6cb377ed4ee737793351869f0ff2891413be2.cu | #include <ATen/ATen.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/cuda/CUDABlas.h>
namespace at { namespace native {
Tensor baddbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm");
return legacy::cuda::_th_baddbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm_out_cuda(Tensor &result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm_out");
return legacy::cuda::_th_baddbmm_out(result, b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm__cuda(Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
return baddbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
}
Tensor& bmm_out_cuda(Tensor &result, const Tensor& batch1, const Tensor& batch2) {
result.resize_({ batch1.size(0), batch1.size(1), batch2.size(2) });
return legacy::cuda::_th_bmm_out(result, batch1, batch2);
}
Tensor bmm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({0}, self.options());
return native::bmm_out_cuda(result, self, mat2);
}
Tensor prepare_matrix_for_cublas(Tensor& tensor, bool& transpose_tensor) {
Tensor tensor_;
IntArrayRef tensor_strides = tensor.strides();
IntArrayRef tensor_sizes = tensor.sizes();
if ((tensor_strides[0] == 1) && (tensor_strides[1] >= std::max<int64_t>(1, tensor_sizes[0]))) {
tensor_ = tensor;
transpose_tensor = false;
} else if ((tensor_strides[1] == 1) && (tensor_strides[0] >= std::max<int64_t>(1, tensor_sizes[1]))) {
tensor_ = tensor;
transpose_tensor = true;
} else {
transpose_tensor = true;
tensor_ = tensor.clone(at::MemoryFormat::Contiguous);
}
return tensor_;
}
namespace {
Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
TORCH_CHECK(mat1.dim() == 2 && mat2.dim() == 2, "tensors must be 2-D");
TensorArg args[]{{result, "out", 0}, {self, "self", 1}, {mat1, "mat1", 2}, {mat2, "mat2", 3}};
checkAllSameGPU("addmm", args);
Tensor self_;
if (&result != &self) {
std::tie(self_) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
} else {
self_ = self;
}
IntArrayRef mat1_sizes = mat1.sizes();
IntArrayRef mat2_sizes = mat2.sizes();
IntArrayRef self__sizes = self_.sizes();
TORCH_CHECK(mat1_sizes[1] == mat2_sizes[0], "mat1 dim 1 must match mat2 dim 0");
TORCH_CHECK(self__sizes[0] == mat1_sizes[0], "self_ dim 0 must match mat1 dim 0");
TORCH_CHECK(self__sizes[1] == mat2_sizes[1], "self_ dim 1 must match mat2 dim 1");
if (&result != &self) {
at::native::resize_as_(result, self_);
if (beta.to<double>() != 0.0) {
at::native::copy_(result, self_);
}
}
TORCH_CHECK(result.dim() == 2 && self_.dim() == 2, "tensors must be 2-D");
IntArrayRef result_sizes = result.sizes();
if ((result_sizes[0] == 0) || (result_sizes[1] == 0)) {
return result;
}
bool transpose_result;
Tensor result_ = prepare_matrix_for_cublas(result, transpose_result);
bool transpose_mat1;
bool transpose_mat2;
Tensor mat1_ = transpose_result ? mat2 : mat1;
Tensor mat2_ = transpose_result ? mat1 : mat2;
mat1_ = prepare_matrix_for_cublas(mat1_, transpose_mat1);
mat2_ = prepare_matrix_for_cublas(mat2_, transpose_mat2);
if (transpose_result) {
transpose_mat1 = !transpose_mat1;
transpose_mat2 = !transpose_mat2;
mat1_sizes = mat1_.sizes();
mat2_sizes = mat2_.sizes();
}
int64_t m = mat1_sizes[transpose_result ? 1 : 0];
int64_t k = mat1_sizes[transpose_result ? 0 : 1];
int64_t n = mat2_sizes[transpose_result ? 0 : 1];
int64_t mat1_ld = mat1_.stride((transpose_mat1 == transpose_result) ? 1 : 0);
int64_t mat2_ld = mat2_.stride((transpose_mat2 == transpose_result) ? 1 : 0);
int64_t result_ld = result_.stride(transpose_result ? 0 : 1);
at::ScalarType scalar_type = self_.scalar_type();
if (mat1.numel() == 0) {
return at::native::mul_out(result, self, at::native::scalar_tensor(beta, at::device(at::kCPU).dtype(self.scalar_type())));
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "addmm_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t* mat1_ptr = mat1_.data_ptr<scalar_t>();
scalar_t* mat2_ptr = mat2_.data_ptr<scalar_t>();
scalar_t* result_ptr = result_.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
transpose_mat1 ? 't' : 'n',
transpose_mat2 ? 't' : 'n',
m, n, k,
alpha_val,
mat1_ptr, mat1_ld,
mat2_ptr, mat2_ld,
beta_val,
result_ptr, result_ld
);
});
if (result.data_ptr() != result_.data_ptr()) {
result.copy_(result_);
}
return result;
}
} // anonymous namespace
Tensor& mm_out_cuda(Tensor& result, const Tensor& self, const Tensor& mat2) {
result.resize_({ self.size(0), mat2.size(1) });
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor mm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({ self.size(0), mat2.size(1) }, self.options());
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor& addmm_out_cuda(Tensor &out, const Tensor &self,
const Tensor &mat1, const Tensor &mat2,
Scalar beta, Scalar alpha) {
{
at::NoNamesGuard guard;
Tensor& result = addmm_out_cuda_impl(out, self, mat1, mat2, beta, alpha);
}
at::namedinference::propagate_names_for_addmm(out, mat1, mat2, self);
return out;
}
Tensor addmm_cuda(const Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
Tensor out = at::empty({0}, self.options());
addmm_out_cuda(out, self, mat1, mat2, beta, alpha);
return out;
}
Tensor& addmm__cuda(Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
addmm_out_cuda(self, self, mat1, mat2, beta, alpha);
return self;
}
template<typename scalar_t>
void addr_impl_ger_cuda(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
scalar_t alpha, scalar_t beta) {
static_assert(std::is_same<scalar_t, float>::value ||
std::is_same<scalar_t, double>::value,
"addr_impl_ger_cuda: only float and double are supported");
if (&out != &self) {
at::native::resize_as_(out, self);
at::native::copy_(out, self);
}
if (beta == 0.0) {
at::native::zero_(out);
}
if (beta != 1.0) {
at::native::mul_(out, beta);
}
if (out.stride(0) == 1) {
at::cuda::blas::ger<scalar_t>(
vec1.size(0), vec2.size(0), alpha,
vec1.data_ptr<scalar_t>(), vec1.stride(0),
vec2.data_ptr<scalar_t>(), vec2.stride(0),
out.data_ptr<scalar_t>(), out.stride(1)
);
} else if (out.stride(1) == 1) {
at::cuda::blas::ger<scalar_t>(
vec2.size(0), vec1.size(0), alpha,
vec2.data_ptr<scalar_t>(), vec2.stride(0),
vec1.data_ptr<scalar_t>(), vec1.stride(0),
out.data_ptr<scalar_t>(), out.stride(0)
);
} else {
Tensor cr = out.clone();
at::cuda::blas::ger<scalar_t>(
vec2.size(0), vec1.size(0), alpha,
vec2.data_ptr<scalar_t>(), vec2.stride(0),
vec1.data_ptr<scalar_t>(), vec1.stride(0),
out.data_ptr<scalar_t>(), out.stride(0)
);
out.set_(cr);
}
}
template<typename scalar_t>
void addr_impl_cuda(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
scalar_t alpha, scalar_t beta) {
// currently no Hger/SgerEx in Cublas.
Tensor vec2T = vec2.reshape({1, vec2.size(0)});
Tensor vec1M = vec1.reshape({vec1.size(0), 1});
addmm_out_cuda(out, self, vec1M, vec2T, beta, alpha);
}
template<>
void addr_impl_cuda<float>(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
float alpha, float beta) {
addr_impl_ger_cuda<float>(out, self, vec1, vec2, alpha, beta);
}
template<>
void addr_impl_cuda<double>(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
double alpha, double beta) {
addr_impl_ger_cuda<double>(out, self, vec1, vec2, alpha, beta);
}
Tensor& addr_out_cuda(Tensor &out, const Tensor& self,
const Tensor& vec1, const Tensor& vec2,
Scalar beta, Scalar alpha) {
TORCH_CHECK(vec1.dim() == 1 && vec2.dim() == 1,
"vec1 and vec2 should be 1-dimensional vectors. Got dimensions ",
vec1.dim(), " and ", vec2.dim());
Tensor self_;
if (&out != &self) {
std::tie(self_) = expand_size(self, {vec1.size(0), vec2.size(0)}, "addr");
} else {
self_ = self;
}
TORCH_CHECK(out.device() == self_.device() &&
out.device() == vec1.device() &&
out.device() == vec2.device(),
"Expected all tensors to be on the same device. Found: ",
out.device(), ", ", self_.device(), ", ",
vec1.device(), " and ", vec2.device());
TORCH_CHECK(self_.dim() == 2,
"2D tensor expected, got ", self_.dim(), "D tensor for input");
TORCH_CHECK(self_.size(0) == vec1.size(0) && self_.size(1) == vec2.size(0),
"size mismatch",
", input: ", self_.sizes(),
", v1: ", vec1.sizes(),
", v2: ", vec2.sizes());
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self_.scalar_type(), "addr_out_cuda", [&] {
addr_impl_cuda<scalar_t>(out, self_, vec1, vec2,
alpha.to<scalar_t>(), beta.to<scalar_t>());
});
return out;
}
Tensor& addr__cuda(Tensor& self,
const Tensor& vec1, const Tensor& vec2,
Scalar beta, Scalar alpha) {
addr_out_cuda(self, self, vec1, vec2, beta, alpha);
return self;
}
Tensor addr_cuda(const Tensor& self,
const Tensor& vec1, const Tensor& vec2,
Scalar beta, Scalar alpha) {
Tensor out = at::empty({0}, self.options());
addr_out_cuda(out, self, vec1, vec2, beta, alpha);
return out;
}
Tensor& addbmm_out_cuda(Tensor& out, const Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha) {
TORCH_CHECK(batch1.dim() == 3 && batch2.dim() == 3,
"Batch tensors should be 3D, got dimensions ", batch1.dim(),
" and ", batch2.dim());
Tensor self_;
if (&out != &self) {
std::tie(self_) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm");
} else {
self_ = self;
}
TORCH_CHECK(out.device() == self_.device() &&
out.device() == batch1.device() &&
out.device() == batch2.device(),
"Expected all tensors to be on the same device. Found: ",
out.device(), ", ", self_.device(), ", ",
batch1.device(), " and ", batch2.device());
TORCH_CHECK(self_.dim() == 2,
"2D tensor expected, got ", self_.dim(), "D tensor for input");
int64_t batchnum = batch1.size(0);
int64_t m1d1 = batch1.size(1);
int64_t innerdim = batch1.size(2);
int64_t m2d2 = batch2.size(2);
TORCH_CHECK(batchnum == batch2.size(0),
"equal number of batches expected");
TORCH_CHECK(m1d1 == self_.size(0),
"first dimension of batch1 must match first dimension of input");
TORCH_CHECK(m2d2 == self_.size(1),
"second dimension of batch2 must match second dimension of input");
TORCH_CHECK(innerdim == batch2.size(1),
"second dimension of batch1 must match first dimension of batch2");
if (&out != &self) {
at::native::resize_as_(out, self_);
if (beta.to<double>() != 0.0) {
at::native::copy_(out, self_);
}
}
for (int64_t i=0; i<batchnum; i++) {
addmm_out_cuda(out, out, batch1[i], batch2[i], beta, alpha);
beta = 1;
}
return out;
}
Tensor& addbmm__cuda(Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha) {
addbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
return self;
}
Tensor addbmm_cuda(const Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha)
{
Tensor out = at::empty({0}, self.options());
addbmm_out_cuda(out, self, batch1, batch2, beta, alpha);
return out;
}
Tensor dot_cuda(const Tensor& self, const Tensor& other) {
at::NoNamesGuard guard;
TORCH_CHECK(
self.dim() == 1 && other.dim() == 1,
"1D tensors expected, but got ",
self.dim(),
"D and ",
other.dim(),
"D tensors");
TORCH_CHECK(
self.scalar_type() == other.scalar_type(),
"dot : expected both vectors to have same dtype, but found ",
self.scalar_type(),
" and ",
other.scalar_type());
TORCH_CHECK(
self.numel() == other.numel(),
"inconsistent tensor size, expected tensor [",
self.numel(),
"] and src [",
other.numel(),
"] to have the same number of elements, but got ",
self.numel(),
" and ",
other.numel(),
" elements respectively");
TORCH_CHECK(
self.device() == other.device(),
"expected all tensors to be on the same device. Found: ",
self.device(),
", ",
other.device());
TORCH_CHECK(
(self.numel() <= INT_MAX) && (self.stride(0) <= INT_MAX) &&
(other.stride(0) <= INT_MAX),
"dot only supports n, incx, incy with the bound [val] <= %d",
INT_MAX);
const int n = static_cast<int>(self.numel());
int incx = static_cast<int>(self.stride(0));
int incy = static_cast<int>(other.stride(0));
if (n == 1) {
incx = 1;
incy = 1;
}
return AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, self.scalar_type(), "dot", [&] {
Tensor result = at::empty({}, self.options());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::PointerModeGuard pointerModeGuard(handle, CUBLAS_POINTER_MODE_DEVICE);
at::cuda::blas::dot<scalar_t>(
handle,
n,
self.data_ptr<scalar_t>(),
incx,
other.data_ptr<scalar_t>(),
incy,
result.data_ptr<scalar_t>());
return result;
});
}
} }
|
2a9e00b197b95cd2ab0c40dd51a90de14a1055ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "Solver_gpu.h"
#include <math.h>
#include <cmath>
#include "Solver.h"
#include "vector_var.h"
#include <iostream>
#include "Solution.h"
#include <fstream>
#include "global_variables.h"
#include "residuals.h"
#include <cstdio>
#include <ctime>
#include "artificial_dissipation.h"
#include <boost/math/special_functions/sign.hpp>
#include <limits>
#include "RungeKutta.h"
#include "tecplot_output.h"
#include "gradients.h"
#include <string>
#include <sstream>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
// Utilities and system includes
#include <helper_cuda.h> // helper function CUDA error checking and initialization
#include <helper_functions.h> // helper for shared functions common to CUDA Samples
#include <hip/hip_runtime_api.h>
#include "lagrangian_object.h"
#include "common_kernels.hpp"
#include "LBFS.hpp"
#include "immersed_boundary_method.hpp"
#define IMMERSED_BOUNDARY_METHOD
using namespace std;
gpu_solver::gpu_solver()
{
//ctor
}
gpu_solver::~gpu_solver()
{
//dtor
}
void gpu_solver::cell_interface_initialiser(double &rho_interface, vector_var &rho_u_interface,
flux_var &x_flux, flux_var &y_flux) {
// initialise variables
// add in reset function
rho_interface = 0;
rho_u_interface.x = 0;
rho_u_interface.y = 0;
rho_u_interface.z = 0;
x_flux.P = 0;
x_flux.momentum_x = 0;
x_flux.momentum_y = 0;
x_flux.momentum_z = 0;
y_flux.P = 0;
y_flux.momentum_x = 0;
y_flux.momentum_y = 0;
y_flux.momentum_z = 0;
}
double gpu_solver::feq_calc_incomp(double weight, vector_var e_alpha, vector_var u_lattice, double u_magnitude,
double cs, double rho_lattice, double rho_0, int k) {
double feq;
feq = e_alpha.Dot_Product(u_lattice) *3.0;
feq = feq + (pow(e_alpha.Dot_Product(u_lattice), 2) - pow((u_magnitude* cs), 2))
*4.5;
feq = feq * weight *rho_0;
feq = feq + weight * rho_lattice;
return feq;
}
double gpu_solver::feq_calc(double weight, vector_var e_alpha, vector_var u_lattice, double u_magnitude,
double cs, double rho_lattice) {
double feq;
feq = 1.0;
feq = feq
+ e_alpha.Dot_Product(u_lattice) *3.0;
feq = feq + (pow(e_alpha.Dot_Product(u_lattice), 2) - pow((u_magnitude* cs), 2))
*4.5;
feq = feq * weight *rho_lattice;
return feq;
}
//get CFL numbers for inviscid and viscous matrices
// see what time stepping results
void gpu_solver::populate_cfl_areas(Solution &cfl_areas, unstructured_mesh &Mesh) {
double area_x, area_y, area_z;
int face;
for (int i = 0; i < Mesh.get_n_cells(); i++) {
area_x = 0;
area_y = 0;
area_z = 0;
// time step condition as per OpenFoam calcs
for (int f = 0; f < Mesh.gradient_faces[i].size(); f++) {
face = Mesh.gradient_faces[i][f];
// eigen values as per Zhaoli guo(2004) - preconditioning
//method as per Jiri Blasek: CFD Principles and Application Determination of Max time Step
// need to calulate correct direction of face vector
area_x = area_x + fabs(Mesh.get_face_i(face)*Mesh.get_face_area(face));
area_y = area_y + fabs(Mesh.get_face_j(face)*Mesh.get_face_area(face));
area_z = area_z + fabs(Mesh.get_face_k(face)*Mesh.get_face_area(face));
}
cfl_areas.add_u(i, area_x / 2);
cfl_areas.add_u(i, area_y / 2);
cfl_areas.add_u(i, area_z / 2);
}
return;
}
void gpu_solver::General_Purpose_Solver_mk_i(unstructured_mesh &Mesh, Solution &soln, Boundary_Conditions &bcs,
external_forces &source, global_variables &globals, domain_geometry &domain,
initial_conditions &init_conds, unstructured_bcs &quad_bcs_orig, int mg,
Solution &residual, int fmg, post_processing &pp, std::vector<lagrangian_object> &object_vec)
{
///Declarations
RungeKutta rk4;
Solution residual_worker(Mesh.get_total_cells()); // stores residuals
Solution vortex_error(Mesh.get_total_cells());
Solution real_error(Mesh.get_total_cells());
Solution wall_shear_stress(Mesh.get_n_wall_cells());
gradients grads(Mesh.get_total_cells());
Solution cfl_areas(Mesh.get_total_cells());
/// Declarations and initialisations
flux_var RK;
double4 *temp_soln, *soln_t0, *soln_t1;
double *force_x, *force_y, *force_z;
//mesh related GPU variables
double3 *d_cfl_areas;
double3 *cell_centroid;
double3 *face_normal;
double3 *face_centroid;
double *cell_volume;
double *surface_area;
int* gradient_stencil;
int* mesh_owner;
int* mesh_neighbour;
double *streaming_dt;
//residual related GPU variables
double *res_rho, *res_u, *res_v, *res_w;
///gradient related GPU variables
double3 *RHS_arr;
double3 *grad_rho_arr;
double3 *grad_u_arr;
double3 *grad_v_arr;
double3 *grad_w_arr;
double4 *res_face;
double *LHS_xx;
double *LHS_xy;
double *LHS_xz;
double *LHS_yx;
double *LHS_yy;
double *LHS_yz;
double *LHS_zx;
double *LHS_zy;
double *LHS_zz;
//bcs related GPU variables
double4 *bcs_arr;
int* bcs_rho_type;
int* bcs_vel_type;
double4* cell_flux_arr;
double delta_t = globals.time_marching_step;
double *d_delta_t_local;
double *local_fneq;
double * delta_t_local;
int *delta_t_frequency;
/// assign memory
{
delta_t_local = new double[Mesh.get_n_cells()];
if (delta_t_local == NULL) exit(1);
delta_t_frequency = new int[Mesh.get_n_cells()];
if (delta_t_frequency == NULL) exit(1);
temp_soln = new double4[Mesh.get_total_cells()];
if (temp_soln == NULL) exit(1);
soln_t0 = new double4[Mesh.get_total_cells()];
if (soln_t0 == NULL) exit(1);
soln_t1 = new double4[Mesh.get_total_cells()];
if (soln_t1 == NULL) exit(1);
d_delta_t_local = new double[Mesh.get_n_cells()];
if (d_delta_t_local == NULL) exit(1);
local_fneq = new double[Mesh.get_total_cells()];
if (local_fneq == NULL) exit(1);
force_x = new double[Mesh.get_n_cells()];
if (force_x == NULL) exit(1);
force_y = new double[Mesh.get_n_cells()];
if (force_y == NULL) exit(1);
force_z = new double[Mesh.get_n_cells()];
if (force_z == NULL) exit(1);
res_rho = new double[Mesh.get_n_cells()];
if (res_rho == NULL) exit(1);
res_u = new double[Mesh.get_n_cells()];
if (res_u == NULL) exit(1);
res_v = new double[Mesh.get_n_cells()];
if (res_v == NULL) exit(1);
res_w = new double[Mesh.get_n_cells()];
if (res_w == NULL) exit(1);
res_face = new double4[Mesh.get_n_faces()];
if (res_face == NULL) exit(1);
//Mesh related allocations
cell_volume = new double[Mesh.get_total_cells()];
if (cell_volume == NULL) exit(1);
surface_area = new double[Mesh.get_n_faces()];
if (surface_area == NULL) exit(1);
gradient_stencil = new int[Mesh.get_n_cells() * 6];
if (gradient_stencil == NULL) exit(1);
mesh_owner = new int[Mesh.get_n_faces()];
if (mesh_owner == NULL) exit(1);
mesh_neighbour = new int[Mesh.get_n_faces()];
if (mesh_neighbour == NULL) exit(1);
d_cfl_areas = new double3[Mesh.get_total_cells()];
if (d_cfl_areas == NULL) exit(1);
cell_centroid = new double3[Mesh.get_total_cells()];
if (cell_centroid == NULL) exit(1);
face_centroid = new double3[Mesh.get_n_faces()];
if (face_centroid == NULL) exit(1);
face_normal = new double3[Mesh.get_n_faces()];
if (face_normal == NULL) exit(1);
streaming_dt = new double[Mesh.get_total_cells()];
if (streaming_dt == NULL) exit(1);
cell_flux_arr = new double4[Mesh.get_n_faces()];
if (cell_flux_arr == NULL) exit(1);
//bcs related GPU variables
bcs_arr = new double4[Mesh.get_num_bc()];
if (bcs_arr == NULL) exit(1);
bcs_rho_type = new int[Mesh.get_num_bc()];
if (bcs_rho_type == NULL) exit(1);
bcs_vel_type = new int[Mesh.get_num_bc()];
if (bcs_vel_type == NULL) exit(1);
//Gradient related allocations
RHS_arr = new double3[Mesh.get_n_cells() * 6];
if (RHS_arr == NULL) exit(1);
grad_rho_arr = new double3[Mesh.get_total_cells()];
if (grad_rho_arr == NULL) exit(1);
grad_u_arr = new double3[Mesh.get_total_cells()];
if (grad_u_arr == NULL) exit(1);
grad_v_arr = new double3[Mesh.get_total_cells()];
if (grad_v_arr == NULL) exit(1);
grad_w_arr = new double3[Mesh.get_total_cells()];
if (grad_w_arr == NULL) exit(1);
LHS_xx = new double[Mesh.get_n_cells()];
if (LHS_xx == NULL) exit(1);
LHS_xy = new double[Mesh.get_n_cells()];
if (LHS_xy == NULL) exit(1);
LHS_xz = new double[Mesh.get_n_cells()];
if (LHS_xz == NULL) exit(1);
LHS_yx = new double[Mesh.get_n_cells()];
if (LHS_yx == NULL) exit(1);
LHS_yy = new double[Mesh.get_n_cells()];
if (LHS_yy == NULL) exit(1);
LHS_yz = new double[Mesh.get_n_cells()];
if (LHS_yz == NULL) exit(1);
LHS_zx = new double[Mesh.get_n_cells()];
if (LHS_zx == NULL) exit(1);
LHS_zy = new double[Mesh.get_n_cells()];
if (LHS_zy == NULL) exit(1);
LHS_zz = new double[Mesh.get_n_cells()];
if (LHS_zz == NULL) exit(1);
}
//lagrangian object allocations
#if defined IMMERSED_BOUNDARY_METHOD
// first get total of object nodes for all cells
//loop through vector
int total_object_nodes = 0;
int total_object_springs = 0;
int total_object_tets = 0;
for (int i = 0; i < object_vec.size(); i++) {
total_object_nodes = total_object_nodes + object_vec[i].num_nodes;
total_object_springs = total_object_springs + object_vec[i].num_springs;
total_object_tets = total_object_tets + object_vec[i].num_tets;
}
double * object_x_ref, *object_y_ref, *object_z_ref;
double * object_x, *object_y, *object_z;
double * object_x0, *object_y0, *object_z0;
double * object_vel_x, *object_vel_y, *object_vel_z;
double * object_force_x, *object_force_y, *object_force_z;
int * object_tet_connectivity;
object_tet_connectivity = new int[total_object_tets *3];
if (object_tet_connectivity == NULL) exit(1);
object_x_ref = new double[total_object_nodes];
if (object_x_ref == NULL) exit(1);
object_y_ref = new double[total_object_nodes];
if (object_y_ref == NULL) exit(1);
object_z_ref = new double[total_object_nodes];
if (object_z_ref == NULL) exit(1);
object_x = new double[total_object_nodes];
if (object_x == NULL) exit(1);
object_y = new double[total_object_nodes];
if (object_y == NULL) exit(1);
object_z = new double[total_object_nodes];
if (object_z == NULL) exit(1);
object_x0 = new double[total_object_nodes];
if (object_x0 == NULL) exit(1);
object_y0 = new double[total_object_nodes];
if (object_y0 == NULL) exit(1);
object_z0 = new double[total_object_nodes];
if (object_z0 == NULL) exit(1);
object_vel_x = new double[total_object_nodes];
if (object_vel_x == NULL) exit(1);
object_vel_y = new double[total_object_nodes];
if (object_vel_y == NULL) exit(1);
object_vel_z = new double[total_object_nodes];
if (object_vel_z == NULL) exit(1);
object_force_x = new double[total_object_nodes];
if (object_force_x == NULL) exit(1);
object_force_y = new double[total_object_nodes];
if (object_force_y == NULL) exit(1);
object_force_z = new double[total_object_nodes];
if (object_force_z == NULL) exit(1);
#endif
double local_tolerance;
double3 mesh_lengths, mesh_origin;
mesh_lengths.x = domain.X;
mesh_lengths.y = domain.Y;
mesh_lengths.z = domain.Z;
mesh_origin.x = domain.origin_x;
mesh_origin.y = domain.origin_y;
mesh_origin.z = domain.origin_z;
double* h_lattice_weight;
h_lattice_weight = new double[15];
if (h_lattice_weight == NULL) exit(1);
double time;
double output_residual_threshold = 0;
double visc;
double angular_freq, wom_cos, force;
double td; // taylor vortex decay time
double drag_t1; //drag co-efficients
std::ofstream error_output, vortex_output, max_u, debug_log;
std::string output_dir, decay_dir, max_u_dir;
output_dir = globals.output_file + "/error.txt";
vector_var cell_1, cell_2, interface_node, lattice_node, delta_u, delta_v, delta_w, delta_rho;
vector_var relative_interface;
vector_var vel_lattice, rho_u_interface, u_interface;
vector_var delta_u1, delta_v1, delta_w1, delta_rho1;
vector_var cell_normal;
vector_var flux_e_alpha[9];
vector_var u, v, w, rho;
std::vector<vector_var> e_alpha;
std::vector<int> cell_nodes;
// vector_var flux_e_alpha;
residuals convergence_residual;
flux_var x_flux, y_flux, z_flux;
flux_var cell_flux;
flux_var debug[4], debug_flux[4], arti_debug[4];
flux_var dbug[4];
flux_var int_debug[4];
int timesteps;
int wall = 0;
tecplot_output<double> tecplot;
///Initialisations
dt = domain.dt; // timestepping for streaming // non-dim equals 1
c = 1; // assume lattice spacing is equal to streaming timestep
cs = c / sqrt(3);
visc = (globals.tau - 0.5) / 3 * domain.dt;
local_tolerance = globals.tolerance;
delta_t = globals.time_marching_step;
timesteps = ceil(globals.simulation_length);
output_dir = globals.output_file + "/error.txt";
decay_dir = globals.output_file + "/vortex_error.txt";
max_u_dir = globals.output_file + "/max_u.txt";
// error_output.open("/home/brendan/Dropbox/PhD/Test Cases/Couette Flow/error.txt", ios::out);
error_output.open(output_dir.c_str(), ios::out);
output_dir = globals.output_file + "/residual_log.txt";
debug_log.open(output_dir.c_str(), ios::out);
vortex_output.open(decay_dir.c_str(), ios::out);
max_u.open(max_u_dir.c_str(), ios::out);
time = 0;
angular_freq = visc * pow(globals.womersley_no, 2) / pow(Mesh.get_Y() / 2, 2);
force = -init_conds.pressure_gradient;
time = 0;
td = 100000000000000000;
grads.pre_fill_LHS_and_RHS_matrix(bcs, Mesh, domain, soln, globals);
populate_cfl_areas(cfl_areas, Mesh);
debug_log << "t,rk,i,res_rho,res_u,res_v,res_w,x,y,z, dt,visc,rho,u,v,ux,uy,uz,vx,vy,vz" << endl;
/// CUDA checks***********************************//////////////////////
hipDeviceProp_t deviceProp;
int argc;
const char *argv = " ";
/*int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0) {
printf("exiting...\n");
exit(EXIT_SUCCESS);
}*/
checkCudaErrors(hipGetDeviceProperties(&deviceProp, 0));
// Statistics about the GPU device
printf(
"> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
checkCudaErrors(hipSetDevice(0));
// num bloacks for different gpu kernels
int blockSize = 256;
int numBlocks = (Mesh.get_total_cells() + blockSize - 1) / blockSize;
int n_Cell_Blocks = (Mesh.get_n_cells() + blockSize - 1) / blockSize;
int n_bc_Blocks = (Mesh.get_num_bc() + blockSize - 1) / blockSize;
int n_face_Blocks = (Mesh.get_n_faces() + blockSize - 1) / blockSize;
#if defined IMMERSED_BOUNDARY_METHOD
int n_node_Blocks = (total_object_nodes + blockSize - 1) / blockSize;
#endif
double delta_x = domain.dt * 2;
double4 convergence;
convergence.w = 100000000000;
double *res_rho_block;
res_rho_block = new double[n_Cell_Blocks];
double *res_u_block;
res_u_block = new double[n_Cell_Blocks];
double *res_v_block;
res_v_block = new double[n_Cell_Blocks];
double *res_w_block;
res_w_block = new double[n_Cell_Blocks];
//arrrays for CUDA
{
checkCudaErrors(hipMallocManaged(&res_rho_block, n_Cell_Blocks * sizeof(double)));
checkCudaErrors(hipMallocManaged(&res_u_block, n_Cell_Blocks * sizeof(double)));
checkCudaErrors(hipMallocManaged(&res_v_block, n_Cell_Blocks * sizeof(double)));
checkCudaErrors(hipMallocManaged(&res_w_block, n_Cell_Blocks * sizeof(double)));
checkCudaErrors(hipMallocManaged(&d_delta_t_local, Mesh.get_total_cells() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&d_cfl_areas, Mesh.get_total_cells() * sizeof(double3)));
checkCudaErrors(hipMallocManaged(&temp_soln, Mesh.get_total_cells() * sizeof(double4)));
checkCudaErrors(hipMallocManaged(&soln_t0, Mesh.get_total_cells() * sizeof(double4)));
checkCudaErrors(hipMallocManaged(&soln_t1, Mesh.get_total_cells() * sizeof(double4)));
checkCudaErrors(hipMallocManaged(&cell_volume, Mesh.get_total_cells() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&gradient_stencil, Mesh.get_n_cells() * sizeof(int) * 6));
checkCudaErrors(hipMallocManaged(&mesh_owner, Mesh.get_n_faces() * sizeof(int)));
checkCudaErrors(hipMallocManaged(&mesh_neighbour, Mesh.get_n_faces() * sizeof(int)));
checkCudaErrors(hipMallocManaged(&cell_centroid, Mesh.get_total_cells() * sizeof(double3)));
checkCudaErrors(hipMallocManaged(&face_centroid, Mesh.get_n_faces() * sizeof(double3)));
checkCudaErrors(hipMallocManaged(&face_normal, Mesh.get_n_faces() * sizeof(double3)));
checkCudaErrors(hipMallocManaged(&surface_area, Mesh.get_n_faces() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&streaming_dt, Mesh.get_n_faces() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&cell_flux_arr, Mesh.get_n_faces() * sizeof(double4)));
checkCudaErrors(hipMallocManaged(&force_x, Mesh.get_total_cells() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&force_y, Mesh.get_total_cells() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&force_z, Mesh.get_total_cells() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&res_rho, Mesh.get_total_cells() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&res_u, Mesh.get_total_cells() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&res_v, Mesh.get_total_cells() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&res_w, Mesh.get_total_cells() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&res_face, Mesh.get_n_faces() * sizeof(double4)));
checkCudaErrors(hipMallocManaged(&local_fneq, Mesh.get_total_cells() * sizeof(double)));
//arrays for bcs
checkCudaErrors(hipMallocManaged(&bcs_arr, Mesh.get_num_bc() * sizeof(double4)));
checkCudaErrors(hipMallocManaged(&bcs_rho_type, Mesh.get_num_bc() * sizeof(int)));
checkCudaErrors(hipMallocManaged(&bcs_vel_type, Mesh.get_num_bc() * sizeof(int)));
//arrrays for CUDA Gradient
checkCudaErrors(hipMallocManaged(&grad_rho_arr, Mesh.get_total_cells() * sizeof(double3)));
checkCudaErrors(hipMallocManaged(&grad_u_arr, Mesh.get_total_cells() * sizeof(double3)));
checkCudaErrors(hipMallocManaged(&grad_v_arr, Mesh.get_total_cells() * sizeof(double3)));
checkCudaErrors(hipMallocManaged(&grad_w_arr, Mesh.get_total_cells() * sizeof(double3)));
checkCudaErrors(hipMallocManaged(&RHS_arr, Mesh.get_n_cells() * sizeof(double3) * 6));
checkCudaErrors(hipMallocManaged(&LHS_xx, Mesh.get_n_cells() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&LHS_xy, Mesh.get_n_cells() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&LHS_xz, Mesh.get_n_cells() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&LHS_yx, Mesh.get_n_cells() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&LHS_yy, Mesh.get_n_cells() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&LHS_yz, Mesh.get_n_cells() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&LHS_zx, Mesh.get_n_cells() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&LHS_zy, Mesh.get_n_cells() * sizeof(double)));
checkCudaErrors(hipMallocManaged(&LHS_zz, Mesh.get_n_cells() * sizeof(double)));
#if defined IMMERSED_BOUNDARY_METHOD
//arrays for lagrangian objects
checkCudaErrors(hipMallocManaged(&object_tet_connectivity, total_object_tets * 3 * sizeof(int)));
checkCudaErrors(hipMallocManaged(&object_x_ref, total_object_nodes * sizeof(double)));
checkCudaErrors(hipMallocManaged(&object_y_ref, total_object_nodes * sizeof(double)));
checkCudaErrors(hipMallocManaged(&object_z_ref, total_object_nodes * sizeof(double)));
checkCudaErrors(hipMallocManaged(&object_x, total_object_nodes * sizeof(double)));
checkCudaErrors(hipMallocManaged(&object_y, total_object_nodes * sizeof(double)));
checkCudaErrors(hipMallocManaged(&object_z, total_object_nodes * sizeof(double)));
checkCudaErrors(hipMallocManaged(&object_x0, total_object_nodes * sizeof(double)));
checkCudaErrors(hipMallocManaged(&object_y0, total_object_nodes * sizeof(double)));
checkCudaErrors(hipMallocManaged(&object_z0, total_object_nodes * sizeof(double)));
checkCudaErrors(hipMallocManaged(&object_vel_x, total_object_nodes * sizeof(double)));
checkCudaErrors(hipMallocManaged(&object_vel_y, total_object_nodes * sizeof(double)));
checkCudaErrors(hipMallocManaged(&object_vel_z, total_object_nodes * sizeof(double)));
checkCudaErrors(hipMallocManaged(&object_force_x, total_object_nodes * sizeof(double)));
checkCudaErrors(hipMallocManaged(&object_force_y, total_object_nodes * sizeof(double)));
checkCudaErrors(hipMallocManaged(&object_force_z, total_object_nodes * sizeof(double)));
#endif
}
populate_e_alpha(e_alpha, h_lattice_weight, c, globals.PI, 15);
checkCudaErrors(hipMemcpyToSymbol(lattice_weight, h_lattice_weight, 15 * sizeof(double)));
/// Sync before CUDA array used
hipDeviceSynchronize();
populate_cfl_areas(d_cfl_areas, Mesh);
// transfer class members to arrays for CUDA
{
soln_to_double(temp_soln, soln, Mesh.get_total_cells());
mesh_to_array(cell_volume, Mesh, Mesh.get_total_cells(), "volume");
mesh_to_array(gradient_stencil, Mesh, Mesh.get_n_cells(), "gradient_stencil");
mesh_to_array(mesh_owner, Mesh, Mesh.get_n_faces(), "mesh_owner");
mesh_to_array(mesh_neighbour, Mesh, Mesh.get_n_faces(), "mesh_neighbour");
mesh_to_array(surface_area, Mesh, Mesh.get_n_faces(), "surface_area");
mesh_to_array(streaming_dt, Mesh, Mesh.get_n_faces(), "streaming_dt");
mesh_to_array_double(face_normal, Mesh, Mesh.get_n_faces(), "face_normal");
mesh_to_array_double(cell_centroid, Mesh, Mesh.get_total_cells(), "cell_centroid");
mesh_to_array_double(face_centroid, Mesh, Mesh.get_n_faces(), "face_centroid");
gradients_to_array(LHS_xx, grads, Mesh.get_n_cells(), "LHS_xx");
gradients_to_array(LHS_xy, grads, Mesh.get_n_cells(), "LHS_xy");
gradients_to_array(LHS_xz, grads, Mesh.get_n_cells(), "LHS_xz");
gradients_to_array(LHS_yx, grads, Mesh.get_n_cells(), "LHS_yx");
gradients_to_array(LHS_yy, grads, Mesh.get_n_cells(), "LHS_yy");
gradients_to_array(LHS_yz, grads, Mesh.get_n_cells(), "LHS_yz");
gradients_to_array(LHS_zx, grads, Mesh.get_n_cells(), "LHS_zx");
gradients_to_array(LHS_zy, grads, Mesh.get_n_cells(), "LHS_zy");
gradients_to_array(LHS_zz, grads, Mesh.get_n_cells(), "LHS_zz");
gradients_to_array_double(RHS_arr, grads, Mesh.get_n_cells(), "RHS_array");
bcs_to_array_double(bcs_arr, bcs, Mesh.get_num_bc(), "bcs");
bcs_to_array(bcs_rho_type, bcs, Mesh.get_num_bc(), "rho_type");
bcs_to_array(bcs_vel_type, bcs, Mesh.get_num_bc(), "vel_type");
}
#if defined IMMERSED_BOUNDARY_METHOD
lagrangian_object_to_array(object_vec, object_x_ref, object_y_ref, object_z_ref, object_x, object_y, object_z, object_x0, object_y0, object_z0, object_tet_connectivity);
#endif
hipProfilerStart();
clone_a_to_b << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), temp_soln, soln_t1); // soln_t0 holds macro variable solution at start of time step
// loop in time
for (int t = 0; t < timesteps; t++) {
// soln_t0 is the solution at the start of every
// RK step.(rk = n) Temp_soln holds the values at end of
// step.(rk = n+1)
clone_a_to_b << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), soln_t1, soln_t0);// soln_t0 holds macro variable solution at start of time step
post_kernel_checks();
//womersley flow peculiarities
if (globals.testcase == 4) {
wom_cos = cos(angular_freq * t * delta_t);
force = -init_conds.pressure_gradient * wom_cos;
}
//local timestepping calculation
// can be removed for uniform grids and replaced with a single calc
hipLaunchKernelGGL(( get_cfl_device) , dim3(n_Cell_Blocks), dim3(blockSize) , 0, 0, Mesh.get_n_cells(), temp_soln, cell_volume, d_delta_t_local, d_cfl_areas, globals.time_marching_step,
globals.max_velocity,globals.pre_conditioned_gamma, globals.visc, globals.gpu_time_stepping);
post_kernel_checks();
#if defined IMMERSED_BOUNDARY_METHOD
// need to propogate node position based on final RK4 velocity
interpolate_velocities_on_nodes << < n_node_Blocks, blockSize >> > (total_object_nodes, object_vel_x, object_vel_y, object_vel_z, object_x, object_y, object_z,
mesh_origin, mesh_lengths, delta_x, temp_soln, Mesh.get_n_cells());
/* interpolate_velocities_on_nodes_cos_kernel << < n_node_Blocks, blockSize >> > (total_object_nodes, object_vel_x, object_vel_y, object_vel_z, object_x, object_y, object_z,
mesh_origin, mesh_lengths, delta_x, temp_soln, globals.PI);*/
update_node_positions_rk4 << < n_node_Blocks, blockSize >> > (total_object_nodes, object_vel_x, object_vel_y, object_vel_z, object_x, object_y, object_z, delta_t, object_vec[0].num_nodes,
object_x0, object_y0, object_z0);
/*fill_zero << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), force_x);*/
fill_double << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), force_x, init_conds.pressure_gradient);
post_kernel_checks();
fill_zero << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), force_y);
post_kernel_checks();
fill_zero << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), force_z);
post_kernel_checks();
//for now assume uniform stiffness, radius etc.
update_node_forces << < n_node_Blocks, blockSize >> > (total_object_nodes, object_force_x, object_force_y, object_force_z, object_x, object_y, object_z, object_x_ref, object_y_ref, object_z_ref,
object_vec[0].stiffness, object_vec[0].radius, globals.PI, object_vec[0].num_nodes, object_vel_x, object_vel_y, object_vel_z, delta_t, object_vec[0].depth);
//assume uniform grid for now, need moving least squares stencil in the future
spread_forces_on_structured_grid << < n_node_Blocks, blockSize >> > (total_object_nodes, object_force_x, object_force_y, object_force_z, object_x, object_y, object_z,
mesh_origin, mesh_lengths, delta_x, force_x, force_y, force_z, Mesh.get_n_cells());
/*spread_forces_on_structured_grid_cos_kernel << < n_node_Blocks, blockSize >> > (total_object_nodes, object_force_x, object_force_y, object_force_z, object_x, object_y, object_z,
mesh_origin, mesh_lengths, delta_x, force_x, force_y, force_z, Mesh.get_n_cells(), globals.PI);*/
#endif
for (int rk = 0; rk < rk4.timesteps; rk++) {
drag_t1 = 0.0;
//update temp_soln boundary conditions
update_unstructured_bcs << < n_bc_Blocks, blockSize >> > (Mesh.get_num_bc(), Mesh.get_n_neighbours(), Mesh.get_n_cells(), mesh_owner, bcs_rho_type, bcs_vel_type, temp_soln, bcs_arr, cell_centroid,domain.Y);
//set to zeros
fill_zero << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_rho);
post_kernel_checks();
fill_zero << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_u);
post_kernel_checks();
fill_zero << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_v);
post_kernel_checks();
fill_zero << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_w);
post_kernel_checks();
// time2 = clock();
hipLaunchKernelGGL(( get_interior_gradients) , dim3(n_Cell_Blocks), dim3(blockSize) , 0, 0, Mesh.get_n_cells(), gradient_stencil, temp_soln,
RHS_arr,LHS_xx, LHS_xy, LHS_xz,LHS_yx, LHS_yy, LHS_yz,LHS_zx, LHS_zy, LHS_zz,
grad_rho_arr, grad_u_arr, grad_v_arr, grad_w_arr);
post_kernel_checks();
//get boundary condition gradients
get_bc_gradients << < n_bc_Blocks, blockSize >> > (Mesh.get_num_bc(), Mesh.get_n_neighbours(), Mesh.get_n_cells(), mesh_owner, bcs_rho_type, bcs_vel_type, temp_soln,
face_normal, cell_centroid, bcs_arr,
grad_rho_arr, grad_u_arr, grad_v_arr, grad_w_arr);
post_kernel_checks();
//time3 = clock();
//std::cout << "CPU Cycles Gradients:" << double(time3 - time2) << std::endl;
wall = 0;
// loop through each cell and exclude the ghost cells
//using n_cells here rather than total_cells
hipDeviceSynchronize();
calc_face_flux << < n_face_Blocks, blockSize >> > (Mesh.get_n_faces(), temp_soln, cell_volume, surface_area, mesh_owner, mesh_neighbour, cell_centroid, face_centroid, face_normal,
streaming_dt, grad_rho_arr, grad_u_arr, grad_v_arr, grad_w_arr, Mesh.get_n_cells(), (1/ globals.pre_conditioned_gamma), local_fneq, globals.visc,
res_rho, res_u,res_v,res_w,res_face,
bcs_rho_type, bcs_vel_type, bcs_arr,globals.PI);
post_kernel_checks();
hipDeviceSynchronize();
add << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_u, force_x);
add << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_v, force_y);
add << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_w, force_z);
post_kernel_checks();
hipDeviceSynchronize();
/*
for (int i = 0; i < Mesh.get_n_cells(); i++) {
debug_log << t << ", " << rk << ", " << i << ", " << res_rho[i] << ", " <<
res_u[i] << ", " << res_v[i] << ", " << res_w[i]
<< ", " <<
Mesh.get_centroid_x(i) << " , " << Mesh.get_centroid_y(i) << "," << Mesh.get_centroid_z(i) << "," <<
delta_t_local[i] << " , " << local_fneq[i] << "," <<
soln.get_rho(i) << "," << soln.get_u(i) << " , " << soln.get_v(i) << " , " <<
grad_u_arr[i].x << " , " << grad_u_arr[i].y << " , " << grad_u_arr[i].z << " , " <<
grad_v_arr[i].x << " , " << grad_v_arr[i].y << " , " << grad_v_arr[i].z << " , " <<
grad_w_arr[i].x << " , " << grad_w_arr[i].y << "," << grad_w_arr[i].z
<< endl;
}*/
//Update solutions //update RK values
time_integration << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), rk, rk4.timesteps, d_delta_t_local, soln_t0, soln_t1, temp_soln,
res_rho, res_u, res_v, res_w);
post_kernel_checks();
#if defined IMMERSED_BOUNDARY_METHOD
//// //for now assume uniform stiffness, radius etc.
//interpolate_velocities_on_nodes << < n_node_Blocks, blockSize >> > (total_object_nodes, object_vel_x, object_vel_y, object_vel_z, object_x, object_y, object_z,
// mesh_origin, mesh_lengths, delta_x, temp_soln);
///*interpolate_velocities_on_nodes_cos_kernel << < n_node_Blocks, blockSize >> > (total_object_nodes, object_vel_x, object_vel_y, object_vel_z, object_x, object_y, object_z,
// mesh_origin, mesh_lengths, delta_x, temp_soln,globals.PI);*/
//update_node_positions << < n_node_Blocks, blockSize >> > (total_object_nodes, object_vel_x, object_vel_y, object_vel_z, object_x, object_y, object_z, delta_t, object_vec[0].num_nodes,rk);
#endif
}
//get square of residuals
square << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_rho);
square << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_u);
square << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_v);
square << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_w);
//reduce add residuals
total<256> << < n_Cell_Blocks, blockSize >> > (res_rho, res_rho_block, Mesh.get_n_cells());
total<256> << < n_Cell_Blocks, blockSize >> > (res_u, res_u_block, Mesh.get_n_cells());
total<256> << < n_Cell_Blocks, blockSize >> > (res_v, res_v_block, Mesh.get_n_cells());
total<256> << < n_Cell_Blocks, blockSize >> > (res_w, res_w_block, Mesh.get_n_cells());
post_kernel_checks();
hipDeviceSynchronize();
convergence_residual.reset();
convergence_residual.l2_norm_rms_moukallad(globals, res_rho_block, res_u_block, res_v_block, res_w_block, n_Cell_Blocks, Mesh.get_n_cells());
/*
calc_total_residual << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), convergence, res_rho, res_u, res_v, res_w);
post_kernel_checks();*/
/*check_error << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), temp_soln);
post_kernel_checks();*/
//convergence_residual.ansys_5_iter_rms(t);
time = t * delta_t;
if (mg == 0 && t%globals.output_step == 1) {
soln.clone(temp_soln);
error_output << t << ", " << convergence_residual.max_error() << ", " <<
convergence_residual.rho_rms << ", " << convergence_residual.u_rms << ", " <<
convergence_residual.v_rms << ", " <<
convergence_residual.w_rms << " , FMG cycle: " << fmg << endl;
cout << "time t=" << time << " error e =" << convergence_residual.max_error()
<< " delta_t:" << delta_t << std::endl;
//max_u << t << "," << soln.get_u(center_node) << "," << force << endl;
cout << "drag: " << drag_t1 << endl;
//only output at decreasing order of magnitudes - save space on hard drive
if (convergence_residual.max_error() < pow(10, output_residual_threshold)) {
tecplot.tecplot_output_unstructured_soln(globals, Mesh, soln, bcs, t, pp, residual_worker, delta_t_local, local_fneq);
tecplot.tecplot_output_lagrangian_object_gpu(object_vel_x, object_vel_y, object_vel_z, object_x, object_y, object_z, object_force_x, object_force_y, object_force_z, globals, domain, t
,object_vec[0].name, object_vec[0].num_nodes, object_vec[0].depth_nodes, object_vec[0].radial_nodes);
output_residual_threshold = output_residual_threshold - 1;
soln.output(globals.output_file, globals, domain);
hipProfilerStop();
}
//soln.output_centrelines(globals.output_file,globals,Mesh,time);
}
if (convergence_residual.max_error() < local_tolerance || time > td) {
if (mg == 0) {
soln.clone(temp_soln);
cout << "convergence" << endl;
cout << "time t=" << time << " error e =" << convergence_residual.max_error()
<< " delta_t:" << delta_t << std::endl;
error_output.close();
debug_log.close();
vortex_output.close();
max_u.close();
// vortex calcs
soln.update_unstructured_bcs(bcs, Mesh, domain, t);
grads.Get_LS_Gradients(bcs, Mesh, domain, soln, globals);
pp.cylinder_post_processing(Mesh, globals, grads, bcs, soln, domain, wall_shear_stress);
// pp.calc_vorticity(x_gradients,y_gradients);
//pp.calc_streamfunction(Mesh,globals,bcs);
tecplot.tecplot_output_unstructured_soln(globals, Mesh, soln, bcs, t, pp, residual_worker, delta_t_local, local_fneq);
tecplot.tecplot_output_lagrangian_object_gpu(object_vel_x, object_vel_y, object_vel_z, object_x, object_y, object_z, object_force_x, object_force_y, object_force_z, globals, domain, timesteps
, object_vec[0].name, object_vec[0].num_nodes, object_vec[0].depth_nodes, object_vec[0].radial_nodes);
//soln.output_centrelines(globals.output_file,globals,Mesh,time);
}
hipProfilerStop();
return;
}
}
// pp.calc_vorticity(x_gradients,y_gradients);
//pp.calc_streamfunction(Mesh,globals,bcs);
hipProfilerStop();
soln.clone(temp_soln);
cout << "out of time" << endl;
error_output.close();
vortex_output.close();
debug_log.close();
max_u.close();
pp.cylinder_post_processing(Mesh, globals, grads, bcs, soln, domain, wall_shear_stress);
tecplot.tecplot_output_unstructured_soln(globals, Mesh, soln, bcs, timesteps, pp, residual_worker, delta_t_local, local_fneq);
tecplot.tecplot_output_lagrangian_object_gpu(object_vel_x, object_vel_y, object_vel_z, object_x, object_y, object_z, object_force_x, object_force_y, object_force_z, globals, domain, timesteps
, object_vec[0].name, object_vec[0].num_nodes, object_vec[0].depth_nodes, object_vec[0].radial_nodes);
}
void gpu_solver::get_weighted_average(gradients &grads, int i, int neighbour, double m1, double m2,
vector_var &u, vector_var &v, vector_var &w, vector_var &rho, unstructured_mesh &mesh)
{
double a, b, x, y, z;
//check for boundary condition
//use boundary cell gradients as these are at cell face
if (neighbour > mesh.get_n_cells()) {
x = grads.get_u(neighbour).x;
y = grads.get_u(neighbour).y;
z = grads.get_u(neighbour).z;
u.set_equal(x, y, z);
x = grads.get_v(neighbour).x;
y = grads.get_v(neighbour).y;
z = grads.get_v(neighbour).z;
v.set_equal(x, y, z);
x = grads.get_w(neighbour).x;
y = grads.get_w(neighbour).y;
z = grads.get_w(neighbour).z;
w.set_equal(x, y, z);
x = grads.get_rho(neighbour).x;
y = grads.get_rho(neighbour).y;
z = grads.get_rho(neighbour).z;
rho.set_equal(x, y, z);
}
else {
a = m1 + m2;
b = m2 / a;
a = m1 / a;
x = grads.get_u(i).x * a + grads.get_u(neighbour).x *b;
y = grads.get_u(i).y * a + grads.get_u(neighbour).y *b;
z = grads.get_u(i).z * a + grads.get_u(neighbour).z *b;
u.set_equal(x, y, z);
x = grads.get_v(i).x * a + grads.get_v(neighbour).x *b;
y = grads.get_v(i).y * a + grads.get_v(neighbour).y *b;
z = grads.get_v(i).z * a + grads.get_v(neighbour).z *b;
v.set_equal(x, y, z);
x = grads.get_w(i).x * a + grads.get_w(neighbour).x *b;
y = grads.get_w(i).y * a + grads.get_w(neighbour).y *b;
z = grads.get_w(i).z * a + grads.get_w(neighbour).z *b;
w.set_equal(x, y, z);
x = grads.get_rho(i).x * a + grads.get_rho(neighbour).x *b;
y = grads.get_rho(i).y * a + grads.get_rho(neighbour).y *b;
z = grads.get_rho(i).z * a + grads.get_rho(neighbour).z *b;
rho.set_equal(x, y, z);
}
}
vector_var gpu_solver::get_e_alpha(int k, double &lattice_weight, double c, double PI) {
vector_var temp;
int x, y, z;
//get e_alpha again
if (k > 0 && k < 5) { //
x = round(cos((k - 1)*PI / 2) * c);
y = round(sin((k - 1)*PI / 2)* c);
z = 0; //update in 3D
lattice_weight = 1.0 / 9.0;
}
else if (k > 4) {
x = round(sqrt(2) * cos((k - 5)*PI / 2 + PI / 4) * c);
y = round(sqrt(2) * sin((k - 5)*PI / 2 + PI / 4) * c);
z = 0; //update in 3D
lattice_weight = 1.0 / 36.0;
}
else {
x = 0;
y = 0;
z = 0;
lattice_weight = 4.0 / 9.0;
}
temp.x = x;
temp.y = y;
temp.z = z;
return temp;
}
void gpu_solver::populate_e_alpha(vector<vector_var> &e_alpha, double *lattice_weight, double c, double PI, int j) {
vector_var temp;
int x[15] = { 0,1,-1,0,0,0,0,1,-1, 1,-1,1,-1,-1,1 };
int y[15] = { 0,0,0,1,-1,0,0,1,-1,1,-1,-1,1,1,-1 };
int z[15] = { 0,0,0,0,0,1,-1,1,-1,-1,1,1,-1,1,-1 };
//get e_alpha again
for (int k = 0; k < j; k++) {
if (k > 0 && k < 7) { //
lattice_weight[k] = 1.0 / 9.0;
}
else if (k > 6) {
lattice_weight[k] = 1.0 / 72.0;
}
else {
lattice_weight[k] = 2.0 / 9.0;
}
temp.x = x[k];
temp.y = y[k];
temp.z = z[k];
e_alpha.push_back(temp);
}
}
void gpu_solver::get_cell_gradients(Mesh &Mesh, int i, int neighbour, int j, Solution &temp_soln,
vector_var &delta_rho, vector_var &delta_rho1,
vector_var &delta_u, vector_var &delta_u1,
vector_var &delta_v, vector_var &delta_v1,
Boundary_Conditions &bcs) {
int neighbour_1, neighbour_2;
vector_var cell_1, cell_2;
// is it N-S or E-W
if (j == 2) {
neighbour_1 = Mesh.get_w_node(i);
neighbour_2 = Mesh.get_e_node(i);
}
else {
neighbour_1 = Mesh.get_s_node(i);
neighbour_2 = Mesh.get_n_node(i);
}
// get neighbouring cells of cells
Mesh.get_centroid(neighbour_1, cell_1);
Mesh.get_centroid(neighbour_2, cell_2);
delta_rho.Get_Gradient(temp_soln.get_rho(neighbour_1), temp_soln.get_rho(neighbour_2)
, cell_1, cell_2);
delta_u.Get_Gradient(temp_soln.get_u(neighbour_1), temp_soln.get_u(neighbour_2)
, cell_1, cell_2);
delta_v.Get_Gradient(temp_soln.get_v(neighbour_1), temp_soln.get_v(neighbour_2)
, cell_1, cell_2);
// get gradient of neighbouring cell
if (j == 2) {
neighbour_1 = Mesh.get_w_node(neighbour);
neighbour_2 = Mesh.get_e_node(neighbour);
}
else {
neighbour_1 = Mesh.get_s_node(neighbour);
neighbour_2 = Mesh.get_n_node(neighbour);
}
// get neighbouring cells of cells
Mesh.get_centroid(neighbour_1, cell_1);
Mesh.get_centroid(neighbour_2, cell_2);
delta_rho1.Get_Gradient(temp_soln.get_rho(neighbour_1), temp_soln.get_rho(neighbour_2)
, cell_1, cell_2);
delta_u1.Get_Gradient(temp_soln.get_u(neighbour_1), temp_soln.get_u(neighbour_2)
, cell_1, cell_2);
delta_v1.Get_Gradient(temp_soln.get_v(neighbour_1), temp_soln.get_v(neighbour_2)
, cell_1, cell_2);
}
void gpu_solver::cell_interface_variables(int j, int i, vector_var &interface_node, int &neighbour, double &interface_area,
vector_var &cell_normal, Boundary_Conditions &boundary_conditions, bc_var &bc,
Mesh &Mesh, vector_var &cell_2) {
switch (j) {
case 0: // West
interface_node.x = Mesh.get_west_x(i);
interface_node.y = Mesh.get_west_y(i);
interface_node.z = Mesh.get_west_z(i);
neighbour = Mesh.get_w_node(i);
interface_area = Mesh.get_w_area(i);
cell_normal.x = Mesh.get_w_i(i);
cell_normal.y = Mesh.get_w_j(i);
cell_normal.z = Mesh.get_w_k(i);
break;
case 1: // South
interface_node.x = Mesh.get_south_x(i);
interface_node.y = Mesh.get_south_y(i);
interface_node.z = Mesh.get_south_z(i);
neighbour = Mesh.get_s_node(i);
interface_area = Mesh.get_s_area(i);
cell_normal.x = Mesh.get_s_i(i);
cell_normal.y = Mesh.get_s_j(i);
cell_normal.z = Mesh.get_s_k(i);
break;
case 2: // East
interface_node.x = Mesh.get_east_x(i);
interface_node.y = Mesh.get_east_y(i);
interface_node.z = Mesh.get_east_z(i);
interface_area = Mesh.get_e_area(i);
neighbour = Mesh.get_e_node(i);
cell_normal.x = Mesh.get_e_i(i);
cell_normal.y = Mesh.get_e_j(i);
cell_normal.z = Mesh.get_e_k(i);
break;
case 3: // North
interface_node.x = Mesh.get_north_x(i);
interface_node.y = Mesh.get_north_y(i);
interface_node.z = Mesh.get_north_z(i);
neighbour = Mesh.get_n_node(i);
interface_area = Mesh.get_n_area(i);
cell_normal.x = Mesh.get_n_i(i);
cell_normal.y = Mesh.get_n_j(i);
cell_normal.z = Mesh.get_n_k(i);
break;
case 4: // Front
interface_node.x = Mesh.get_front_x(i);
interface_node.y = Mesh.get_front_y(i);
interface_node.z = Mesh.get_front_z(i);
neighbour = Mesh.get_f_node(i);
interface_area = Mesh.get_f_area(i);
cell_normal.x = Mesh.get_f_i(i);
cell_normal.y = Mesh.get_f_j(i);
cell_normal.z = Mesh.get_f_k(i);
break;
case 5: // Back
interface_node.x = Mesh.get_back_x(i);
interface_node.y = Mesh.get_back_y(i);
interface_node.z = Mesh.get_back_z(i);
neighbour = Mesh.get_b_node(i);
interface_area = Mesh.get_b_area(i);
cell_normal.x = Mesh.get_b_i(i);
cell_normal.y = Mesh.get_b_j(i);
cell_normal.z = Mesh.get_b_k(i);
break;
}
// cell_2.x = Mesh.get_centroid_x(neighbour);
// cell_2.y = Mesh.get_centroid_y((neighbour));
// cell_2.z = Mesh.get_centroid_z(neighbour);
}
void gpu_solver::cell_interface_variables(int face, int i, vector_var &interface_node, int &neighbour, double &interface_area,
vector_var &cell_normal, Boundary_Conditions &boundary_conditions, bc_var &bc,
unstructured_mesh &Mesh, vector_var &cell_2, vector_var &cell_1) {
interface_node.x = Mesh.get_face_x(face);
interface_node.y = Mesh.get_face_y(face);
interface_node.z = Mesh.get_face_z(face);
neighbour = Mesh.get_mesh_neighbour(face);
interface_area = Mesh.get_face_area(face);
cell_normal.x = Mesh.get_face_i(face);
cell_normal.y = Mesh.get_face_j(face);
cell_normal.z = Mesh.get_face_k(face);
cell_2.x = Mesh.get_centroid_x(neighbour);
cell_2.y = Mesh.get_centroid_y((neighbour));
cell_2.z = Mesh.get_centroid_z(neighbour);
}
void gpu_solver::get_cell_nodes(std::vector<int> &cell_nodes, Boundary_Conditions &bcs, int neighbour,
Mesh &Mesh, int i, int j) {
//current cell
cell_nodes.clear();
if (bcs.get_bc(i) || bcs.get_bc(neighbour)) {
cell_nodes.push_back(i);
cell_nodes.push_back(neighbour);
}
else if (j == 2) {
cell_nodes.push_back(i);
cell_nodes.push_back(Mesh.get_n_node(i));
//cell_nodes.push_back(Mesh.get_e_node(i));
//cell_nodes.push_back(Mesh.get_w_node(i));
cell_nodes.push_back(Mesh.get_s_node(i));
cell_nodes.push_back(neighbour);
cell_nodes.push_back(Mesh.get_n_node(neighbour));
//cell_nodes.push_back(Mesh.get_e_node(neighbour));
//cell_nodes.push_back(Mesh.get_w_node(neighbour));
cell_nodes.push_back(Mesh.get_s_node(neighbour));
}
else {
cell_nodes.push_back(i);
//cell_nodes.push_back(Mesh.get_n_node(i));
cell_nodes.push_back(Mesh.get_e_node(i));
cell_nodes.push_back(Mesh.get_w_node(i));
// cell_nodes.push_back(Mesh.get_s_node(i));
cell_nodes.push_back(neighbour);
//cell_nodes.push_back(Mesh.get_n_node(neighbour));
cell_nodes.push_back(Mesh.get_e_node(neighbour));
cell_nodes.push_back(Mesh.get_w_node(neighbour));
//cell_nodes.push_back(Mesh.get_s_node(neighbour));
}
}
//get CFL numbers for inviscid and viscous matrices
// see what time stepping results
void gpu_solver::populate_cfl_areas(double3 *cfl_areas, unstructured_mesh &Mesh) {
double area_x, area_y, area_z;
int face;
double3 temp;
for (int i = 0; i < Mesh.get_n_cells(); i++) {
area_x = 0;
area_y = 0;
area_z = 0;
// time step condition as per OpenFoam calcs
for (int f = 0; f < Mesh.gradient_faces[i].size(); f++) {
face = Mesh.gradient_faces[i][f];
// eigen values as per Zhaoli guo(2004) - preconditioning
//method as per Jiri Blasek: CFD Principles and Application Determination of Max time Step
// need to calulate correct direction of face vector
area_x = area_x + fabs(Mesh.get_face_i(face)*Mesh.get_face_area(face));
area_y = area_y + fabs(Mesh.get_face_j(face)*Mesh.get_face_area(face));
area_z = area_z + fabs(Mesh.get_face_k(face)*Mesh.get_face_area(face));
}
temp.x = area_x / 2;
temp.y = area_y / 2;
temp.z = area_z / 2;
cfl_areas[i] = temp;
}
return;
}
//get CFL numbers for inviscid and viscous matrices
// see what time stepping results
void gpu_solver::inverse_weighted_distance_interpolation(double &u, double &v, double &rho, Boundary_Conditions &bcs,
Mesh &Mesh, domain_geometry &domain, Solution &soln, vector_var &interface_node,
int k, int i, int neighbour, vector<vector_var> &e_alpha, int j, std::vector<int> &cell_nodes) {
// get interface node
double w_u, w_v, w_rho, w_sum, w; // weighted macros
// get 8 nodes'
w_u = 0.0;
w_v = 0.0;
w_rho = 0.0;
w_sum = 0.0;
double r;
r = 0.0;
double dt;
if (j == 2) {
dt = Mesh.get_delta_t_e(i);
}
else {
dt = Mesh.get_delta_t_n(i);
}
//get displacements
vector_var node_displacement, target_node;
// get target node
target_node.x = interface_node.x - e_alpha[k].x * dt;
target_node.y = interface_node.y - e_alpha[k].y * dt;
target_node.z = interface_node.z - e_alpha[k].z * dt;
for (auto &it : cell_nodes) {
node_displacement.x = Mesh.get_centroid_x(it) - target_node.x;
node_displacement.y = Mesh.get_centroid_y(it) - target_node.y;
node_displacement.z = Mesh.get_centroid_z(it) - target_node.z;
r = node_displacement.Magnitude();
//
if (r < 10e-5) {
u = soln.get_u(it);
w = soln.get_v(it);
rho = soln.get_rho(it);
return;
}
//get weight for this cc
w = pow(1 / r, 2.0);
// sum weighted cc values
w_u = w_u + w * soln.get_u(it);
w_v = w_v + w * soln.get_v(it);
w_rho = w_rho + w * soln.get_rho(it);
w_sum = w_sum + w;
}
// calc u v rho for target node
u = w_u / w_sum;
v = w_v / w_sum;
rho = w_rho / w_sum;
}
void gpu_solver::find_real_time(double* delta_t_local, double* local_time, bool* calc_face,
unstructured_mesh &Mesh, bool* calc_cell) {
// for each cell check cell calc check if time is greater than neighbouring cells;
int nb;
for (int i = 0; i < Mesh.get_total_cells(); i++) {
// first update actual time
if (calc_cell[i]) {
local_time[i] = local_time[i] + delta_t_local[i];
}
}
for (int i = 0; i < Mesh.get_total_cells(); i++) {
calc_cell[i] = true;
for (int j = 0; j < Mesh.gradient_cells[i].size(); j++) {
nb = Mesh.gradient_cells[i][j];
if (local_time[i] > local_time[nb]) {
calc_cell[i] = false;
j = Mesh.gradient_cells[i].size();
}
}
}
// then for each face calc if it should be calculated
for (int k = 0; k < Mesh.get_n_faces(); k++) {
calc_face[k] = false;
if (calc_cell[Mesh.get_mesh_owner(k)] || calc_cell[Mesh.get_mesh_neighbour(k)]) {
calc_face[k] = true;
}
}
}
void gpu_solver::post_kernel_checks() {
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
}
template <typename T>
void gpu_solver::bcs_to_array(T* target, Boundary_Conditions &bcs, int total_nodes, std::string name) {
for (int i = 0; i < total_nodes; i++) {
if (name.compare("vel_type") == 0) {
target[i] = bcs.get_vel_type(i);
}
else if (name.compare("rho_type") == 0) {
target[i] = bcs.get_rho_type(i);
}
}
}
template <typename T>
void gpu_solver::bcs_to_array_double(T* target, Boundary_Conditions &bcs, int total_nodes, std::string name) {
for (int i = 0; i < total_nodes; i++) {
if (name.compare("bcs") == 0) {
double4 temp;
temp.x = bcs.get_u(i);
temp.y = bcs.get_v(i);
temp.z = bcs.get_w(i);
temp.w = bcs.get_rho(i);
target[i] = temp;
}
}
}
void gpu_solver::lagrangian_object_to_array( std::vector<lagrangian_object> &obj_vec, double* &x_ref, double* &y_ref, double* &z_ref, double* &x, double* &y, double* & z,
double* &x0, double* &y0, double* & z0, int * & tet_connectivity) {
int n = 0;
for (int i = 0; i < obj_vec.size(); i++) {
for (int j = 0; j < obj_vec[i].num_nodes; j++) {
x_ref[n] = obj_vec[i].node_x_ref[j];
x[n] = obj_vec[i].node_x[j];
x0[n] = obj_vec[i].node_x[j];
y_ref[n] = obj_vec[i].node_y_ref[j];
y[n] = obj_vec[i].node_y[j];
y0[n] = obj_vec[i].node_y[j];
z_ref[n] = obj_vec[i].node_z_ref[j];
z[n] = obj_vec[i].node_z[j];
z0[n] = obj_vec[i].node_z[j];
n++;
}
// transfer tet related variables
for (int k = 0; k < obj_vec[i].num_tets*3; k++) {
tet_connectivity[k] = obj_vec[i].tet_connectivity[k];
}
}
}
template <typename T>
void gpu_solver::mesh_to_array(T* target, unstructured_mesh &mesh, int total_nodes, std::string name) {
for (int i = 0; i < total_nodes; i++) {
if (name.compare("volume") == 0) {
target[i] = mesh.get_cell_volume(i);
}
else if (name.compare("gradient_stencil") == 0) {
for (int j = 0; j < 6; j++) {
target[i * 6 + j] = mesh.gradient_cells[i][j];
}
}
else if(name.compare("mesh_owner") == 0) {
target[i] = mesh.get_mesh_owner(i);
}
else if (name.compare("mesh_neighbour") == 0) {
target[i] = mesh.get_mesh_neighbour(i);
}
else if (name.compare("surface_area") == 0) {
target[i] = mesh.get_face_area(i);
}
else if (name.compare("streaming_dt") == 0) {
target[i] = mesh.get_delta_t_face(i);
}
}
}
template <typename T>
void gpu_solver::mesh_to_array_double(T* target, unstructured_mesh &mesh, int total_nodes, std::string name)
{
for (int i = 0; i < total_nodes; i++) {
if (name.compare("cell_centroid") == 0) {
double3 temp;
temp.x = mesh.get_centroid_x(i);
temp.y = mesh.get_centroid_y(i);
temp.z = mesh.get_centroid_z(i);
target[i] = temp;
}else if (name.compare("face_normal") == 0) {
double3 temp;
temp.x = mesh.get_face_i(i);
temp.y = mesh.get_face_j(i);
temp.z = mesh.get_face_k(i);
target[i] = temp;
}
else if (name.compare("face_centroid") == 0) {
double3 temp;
temp.x = mesh.get_face_x(i);
temp.y = mesh.get_face_y(i);
temp.z = mesh.get_face_z(i);
target[i] = temp;
}
}
}
template <typename T>
void gpu_solver::gradients_to_array_double(T* target, gradients &grads, int total_nodes, std::string name)
{
for (int i = 0; i < total_nodes; i++) {
if (name.compare("RHS_array") == 0) {
for (int j = 0; j < 6; j++) {
double3 temp;
temp.x = double(grads.RHS_x[i * 6 + j]);
temp.y = double(grads.RHS_y[i * 6 + j]);
temp.z = double(grads.RHS_z[i * 6 + j]);
target[i * 6 + j] = temp;
}
}
}
}
template <typename T>
void gpu_solver::gradients_to_array(T* target, gradients &grads, int total_nodes, std::string name)
{
for (int i = 0; i < total_nodes; i++) {
if(name.compare("LHS_xx") == 0) {
target[i] = double(grads.LHS_xx[i]);
}
else if (name.compare("LHS_xy") == 0) {
target[i] = grads.LHS_xy[i];
}
else if (name.compare("LHS_xz") == 0) {
target[i] = grads.LHS_xz[i];
}
else if (name.compare("LHS_yx") == 0) {
target[i] = grads.LHS_yx[i];
}
else if (name.compare("LHS_yy") == 0) {
target[i] = grads.LHS_yy[i];
}
else if (name.compare("LHS_yz") == 0) {
target[i] = grads.LHS_yz[i];
}
else if (name.compare("LHS_zx") == 0) {
target[i] = grads.LHS_zx[i];
}
else if (name.compare("LHS_zy") == 0) {
target[i] = grads.LHS_zy[i];
}
else if (name.compare("LHS_zz") == 0) {
target[i] = grads.LHS_zz[i];
}
}
}
void gpu_solver::soln_to_double(double4* target, Solution &soln_a, int total_nodes) {
for (int i = 0; i < total_nodes; i++) {
double4 tmp;
tmp.w = soln_a.get_rho(i);
tmp.x = soln_a.get_u(i);
tmp.y = soln_a.get_v(i);
tmp.z = soln_a.get_w(i);
target[i] = tmp;
}
};
//get CFL numbers for inviscid and viscous matrices
// see what time stepping results
void gpu_solver::get_cfl(double &delta_t, Solution &soln
, unstructured_mesh &Mesh, global_variables &globals, double* delta_t_local, int* delta_t_frequency, Solution &cfl_areas) {
double factor;
double area_x_eigen, visc_eigen;
factor = globals.time_marching_step;
double visc_constant;
visc_constant = 4;
double min_delta_t, temp;
double effective_speed_of_sound;
//effective_speed_of_sound = 1/sqrt(3);
effective_speed_of_sound = globals.max_velocity* sqrt(1 - globals.pre_conditioned_gamma + pow(globals.pre_conditioned_gamma / sqrt(3) / globals.max_velocity, 2));
//loop through cells
min_delta_t = 100000000000;
for (int i = 0; i < Mesh.get_n_cells(); i++) {
delta_t_frequency[i] = 1;
// eigen values as per Zhaoli guo(2004) - preconditioning
//estimation of spectral radii s per Jiri Blasek: CFD Principles and Application Determination of Max time Step
area_x_eigen = 0;
area_x_eigen = (fabs(soln.get_u(i)) + effective_speed_of_sound)*cfl_areas.get_u(i)
+ (fabs(soln.get_v(i)) + effective_speed_of_sound)*cfl_areas.get_v(i)
+ (fabs(soln.get_w(i)) + effective_speed_of_sound)*cfl_areas.get_w(i);
area_x_eigen = area_x_eigen / globals.pre_conditioned_gamma;
//reducing preconditioning increases viscous flux - increases eigenvalue
visc_eigen = 2 * globals.visc / globals.pre_conditioned_gamma / soln.get_rho(i) / Mesh.get_cell_volume(i);
visc_eigen = visc_eigen * (cfl_areas.get_u(i)*cfl_areas.get_u(i) + cfl_areas.get_v(i)*cfl_areas.get_v(i) + cfl_areas.get_w(i)* cfl_areas.get_w(i));
area_x_eigen = area_x_eigen + visc_constant * visc_eigen;
// use smallest time step allowed
temp = factor * Mesh.get_cell_volume(i) / area_x_eigen;
if (temp < 0) {
min_delta_t = temp;
}
if (temp < min_delta_t) {
min_delta_t = temp;
}
if (globals.time_stepping == "local" || globals.time_stepping == "talts") {
delta_t_local[i] = temp;
}
else { //constant user defined time step
delta_t_local[i] = factor;
}
}
if (globals.time_stepping == "min") {
std::fill_n(delta_t_local, Mesh.get_n_cells(), min_delta_t);
}
if (globals.time_stepping == "talts") {
for (int i = 0; i < Mesh.get_n_cells(); i++) {
delta_t_frequency[i] = pow(2, floor(log2(delta_t_local[i] / min_delta_t)));
delta_t_local[i] = min_delta_t * delta_t_frequency[i];
}
}
return;
}
| 2a9e00b197b95cd2ab0c40dd51a90de14a1055ec.cu |
#include "Solver_gpu.h"
#include <math.h>
#include <cmath>
#include "Solver.h"
#include "vector_var.h"
#include <iostream>
#include "Solution.h"
#include <fstream>
#include "global_variables.h"
#include "residuals.h"
#include <cstdio>
#include <ctime>
#include "artificial_dissipation.h"
#include <boost/math/special_functions/sign.hpp>
#include <limits>
#include "RungeKutta.h"
#include "tecplot_output.h"
#include "gradients.h"
#include <string>
#include <sstream>
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
// Utilities and system includes
#include <helper_cuda.h> // helper function CUDA error checking and initialization
#include <helper_functions.h> // helper for shared functions common to CUDA Samples
#include <cuda_profiler_api.h>
#include "lagrangian_object.h"
#include "common_kernels.hpp"
#include "LBFS.hpp"
#include "immersed_boundary_method.hpp"
#define IMMERSED_BOUNDARY_METHOD
using namespace std;
gpu_solver::gpu_solver()
{
//ctor
}
gpu_solver::~gpu_solver()
{
//dtor
}
void gpu_solver::cell_interface_initialiser(double &rho_interface, vector_var &rho_u_interface,
flux_var &x_flux, flux_var &y_flux) {
// initialise variables
// add in reset function
rho_interface = 0;
rho_u_interface.x = 0;
rho_u_interface.y = 0;
rho_u_interface.z = 0;
x_flux.P = 0;
x_flux.momentum_x = 0;
x_flux.momentum_y = 0;
x_flux.momentum_z = 0;
y_flux.P = 0;
y_flux.momentum_x = 0;
y_flux.momentum_y = 0;
y_flux.momentum_z = 0;
}
double gpu_solver::feq_calc_incomp(double weight, vector_var e_alpha, vector_var u_lattice, double u_magnitude,
double cs, double rho_lattice, double rho_0, int k) {
double feq;
feq = e_alpha.Dot_Product(u_lattice) *3.0;
feq = feq + (pow(e_alpha.Dot_Product(u_lattice), 2) - pow((u_magnitude* cs), 2))
*4.5;
feq = feq * weight *rho_0;
feq = feq + weight * rho_lattice;
return feq;
}
double gpu_solver::feq_calc(double weight, vector_var e_alpha, vector_var u_lattice, double u_magnitude,
double cs, double rho_lattice) {
double feq;
feq = 1.0;
feq = feq
+ e_alpha.Dot_Product(u_lattice) *3.0;
feq = feq + (pow(e_alpha.Dot_Product(u_lattice), 2) - pow((u_magnitude* cs), 2))
*4.5;
feq = feq * weight *rho_lattice;
return feq;
}
//get CFL numbers for inviscid and viscous matrices
// see what time stepping results
void gpu_solver::populate_cfl_areas(Solution &cfl_areas, unstructured_mesh &Mesh) {
double area_x, area_y, area_z;
int face;
for (int i = 0; i < Mesh.get_n_cells(); i++) {
area_x = 0;
area_y = 0;
area_z = 0;
// time step condition as per OpenFoam calcs
for (int f = 0; f < Mesh.gradient_faces[i].size(); f++) {
face = Mesh.gradient_faces[i][f];
// eigen values as per Zhaoli guo(2004) - preconditioning
//method as per Jiri Blasek: CFD Principles and Application Determination of Max time Step
// need to calulate correct direction of face vector
area_x = area_x + fabs(Mesh.get_face_i(face)*Mesh.get_face_area(face));
area_y = area_y + fabs(Mesh.get_face_j(face)*Mesh.get_face_area(face));
area_z = area_z + fabs(Mesh.get_face_k(face)*Mesh.get_face_area(face));
}
cfl_areas.add_u(i, area_x / 2);
cfl_areas.add_u(i, area_y / 2);
cfl_areas.add_u(i, area_z / 2);
}
return;
}
void gpu_solver::General_Purpose_Solver_mk_i(unstructured_mesh &Mesh, Solution &soln, Boundary_Conditions &bcs,
external_forces &source, global_variables &globals, domain_geometry &domain,
initial_conditions &init_conds, unstructured_bcs &quad_bcs_orig, int mg,
Solution &residual, int fmg, post_processing &pp, std::vector<lagrangian_object> &object_vec)
{
///Declarations
RungeKutta rk4;
Solution residual_worker(Mesh.get_total_cells()); // stores residuals
Solution vortex_error(Mesh.get_total_cells());
Solution real_error(Mesh.get_total_cells());
Solution wall_shear_stress(Mesh.get_n_wall_cells());
gradients grads(Mesh.get_total_cells());
Solution cfl_areas(Mesh.get_total_cells());
/// Declarations and initialisations
flux_var RK;
double4 *temp_soln, *soln_t0, *soln_t1;
double *force_x, *force_y, *force_z;
//mesh related GPU variables
double3 *d_cfl_areas;
double3 *cell_centroid;
double3 *face_normal;
double3 *face_centroid;
double *cell_volume;
double *surface_area;
int* gradient_stencil;
int* mesh_owner;
int* mesh_neighbour;
double *streaming_dt;
//residual related GPU variables
double *res_rho, *res_u, *res_v, *res_w;
///gradient related GPU variables
double3 *RHS_arr;
double3 *grad_rho_arr;
double3 *grad_u_arr;
double3 *grad_v_arr;
double3 *grad_w_arr;
double4 *res_face;
double *LHS_xx;
double *LHS_xy;
double *LHS_xz;
double *LHS_yx;
double *LHS_yy;
double *LHS_yz;
double *LHS_zx;
double *LHS_zy;
double *LHS_zz;
//bcs related GPU variables
double4 *bcs_arr;
int* bcs_rho_type;
int* bcs_vel_type;
double4* cell_flux_arr;
double delta_t = globals.time_marching_step;
double *d_delta_t_local;
double *local_fneq;
double * delta_t_local;
int *delta_t_frequency;
/// assign memory
{
delta_t_local = new double[Mesh.get_n_cells()];
if (delta_t_local == NULL) exit(1);
delta_t_frequency = new int[Mesh.get_n_cells()];
if (delta_t_frequency == NULL) exit(1);
temp_soln = new double4[Mesh.get_total_cells()];
if (temp_soln == NULL) exit(1);
soln_t0 = new double4[Mesh.get_total_cells()];
if (soln_t0 == NULL) exit(1);
soln_t1 = new double4[Mesh.get_total_cells()];
if (soln_t1 == NULL) exit(1);
d_delta_t_local = new double[Mesh.get_n_cells()];
if (d_delta_t_local == NULL) exit(1);
local_fneq = new double[Mesh.get_total_cells()];
if (local_fneq == NULL) exit(1);
force_x = new double[Mesh.get_n_cells()];
if (force_x == NULL) exit(1);
force_y = new double[Mesh.get_n_cells()];
if (force_y == NULL) exit(1);
force_z = new double[Mesh.get_n_cells()];
if (force_z == NULL) exit(1);
res_rho = new double[Mesh.get_n_cells()];
if (res_rho == NULL) exit(1);
res_u = new double[Mesh.get_n_cells()];
if (res_u == NULL) exit(1);
res_v = new double[Mesh.get_n_cells()];
if (res_v == NULL) exit(1);
res_w = new double[Mesh.get_n_cells()];
if (res_w == NULL) exit(1);
res_face = new double4[Mesh.get_n_faces()];
if (res_face == NULL) exit(1);
//Mesh related allocations
cell_volume = new double[Mesh.get_total_cells()];
if (cell_volume == NULL) exit(1);
surface_area = new double[Mesh.get_n_faces()];
if (surface_area == NULL) exit(1);
gradient_stencil = new int[Mesh.get_n_cells() * 6];
if (gradient_stencil == NULL) exit(1);
mesh_owner = new int[Mesh.get_n_faces()];
if (mesh_owner == NULL) exit(1);
mesh_neighbour = new int[Mesh.get_n_faces()];
if (mesh_neighbour == NULL) exit(1);
d_cfl_areas = new double3[Mesh.get_total_cells()];
if (d_cfl_areas == NULL) exit(1);
cell_centroid = new double3[Mesh.get_total_cells()];
if (cell_centroid == NULL) exit(1);
face_centroid = new double3[Mesh.get_n_faces()];
if (face_centroid == NULL) exit(1);
face_normal = new double3[Mesh.get_n_faces()];
if (face_normal == NULL) exit(1);
streaming_dt = new double[Mesh.get_total_cells()];
if (streaming_dt == NULL) exit(1);
cell_flux_arr = new double4[Mesh.get_n_faces()];
if (cell_flux_arr == NULL) exit(1);
//bcs related GPU variables
bcs_arr = new double4[Mesh.get_num_bc()];
if (bcs_arr == NULL) exit(1);
bcs_rho_type = new int[Mesh.get_num_bc()];
if (bcs_rho_type == NULL) exit(1);
bcs_vel_type = new int[Mesh.get_num_bc()];
if (bcs_vel_type == NULL) exit(1);
//Gradient related allocations
RHS_arr = new double3[Mesh.get_n_cells() * 6];
if (RHS_arr == NULL) exit(1);
grad_rho_arr = new double3[Mesh.get_total_cells()];
if (grad_rho_arr == NULL) exit(1);
grad_u_arr = new double3[Mesh.get_total_cells()];
if (grad_u_arr == NULL) exit(1);
grad_v_arr = new double3[Mesh.get_total_cells()];
if (grad_v_arr == NULL) exit(1);
grad_w_arr = new double3[Mesh.get_total_cells()];
if (grad_w_arr == NULL) exit(1);
LHS_xx = new double[Mesh.get_n_cells()];
if (LHS_xx == NULL) exit(1);
LHS_xy = new double[Mesh.get_n_cells()];
if (LHS_xy == NULL) exit(1);
LHS_xz = new double[Mesh.get_n_cells()];
if (LHS_xz == NULL) exit(1);
LHS_yx = new double[Mesh.get_n_cells()];
if (LHS_yx == NULL) exit(1);
LHS_yy = new double[Mesh.get_n_cells()];
if (LHS_yy == NULL) exit(1);
LHS_yz = new double[Mesh.get_n_cells()];
if (LHS_yz == NULL) exit(1);
LHS_zx = new double[Mesh.get_n_cells()];
if (LHS_zx == NULL) exit(1);
LHS_zy = new double[Mesh.get_n_cells()];
if (LHS_zy == NULL) exit(1);
LHS_zz = new double[Mesh.get_n_cells()];
if (LHS_zz == NULL) exit(1);
}
//lagrangian object allocations
#if defined IMMERSED_BOUNDARY_METHOD
// first get total of object nodes for all cells
//loop through vector
int total_object_nodes = 0;
int total_object_springs = 0;
int total_object_tets = 0;
for (int i = 0; i < object_vec.size(); i++) {
total_object_nodes = total_object_nodes + object_vec[i].num_nodes;
total_object_springs = total_object_springs + object_vec[i].num_springs;
total_object_tets = total_object_tets + object_vec[i].num_tets;
}
double * object_x_ref, *object_y_ref, *object_z_ref;
double * object_x, *object_y, *object_z;
double * object_x0, *object_y0, *object_z0;
double * object_vel_x, *object_vel_y, *object_vel_z;
double * object_force_x, *object_force_y, *object_force_z;
int * object_tet_connectivity;
object_tet_connectivity = new int[total_object_tets *3];
if (object_tet_connectivity == NULL) exit(1);
object_x_ref = new double[total_object_nodes];
if (object_x_ref == NULL) exit(1);
object_y_ref = new double[total_object_nodes];
if (object_y_ref == NULL) exit(1);
object_z_ref = new double[total_object_nodes];
if (object_z_ref == NULL) exit(1);
object_x = new double[total_object_nodes];
if (object_x == NULL) exit(1);
object_y = new double[total_object_nodes];
if (object_y == NULL) exit(1);
object_z = new double[total_object_nodes];
if (object_z == NULL) exit(1);
object_x0 = new double[total_object_nodes];
if (object_x0 == NULL) exit(1);
object_y0 = new double[total_object_nodes];
if (object_y0 == NULL) exit(1);
object_z0 = new double[total_object_nodes];
if (object_z0 == NULL) exit(1);
object_vel_x = new double[total_object_nodes];
if (object_vel_x == NULL) exit(1);
object_vel_y = new double[total_object_nodes];
if (object_vel_y == NULL) exit(1);
object_vel_z = new double[total_object_nodes];
if (object_vel_z == NULL) exit(1);
object_force_x = new double[total_object_nodes];
if (object_force_x == NULL) exit(1);
object_force_y = new double[total_object_nodes];
if (object_force_y == NULL) exit(1);
object_force_z = new double[total_object_nodes];
if (object_force_z == NULL) exit(1);
#endif
double local_tolerance;
double3 mesh_lengths, mesh_origin;
mesh_lengths.x = domain.X;
mesh_lengths.y = domain.Y;
mesh_lengths.z = domain.Z;
mesh_origin.x = domain.origin_x;
mesh_origin.y = domain.origin_y;
mesh_origin.z = domain.origin_z;
double* h_lattice_weight;
h_lattice_weight = new double[15];
if (h_lattice_weight == NULL) exit(1);
double time;
double output_residual_threshold = 0;
double visc;
double angular_freq, wom_cos, force;
double td; // taylor vortex decay time
double drag_t1; //drag co-efficients
std::ofstream error_output, vortex_output, max_u, debug_log;
std::string output_dir, decay_dir, max_u_dir;
output_dir = globals.output_file + "/error.txt";
vector_var cell_1, cell_2, interface_node, lattice_node, delta_u, delta_v, delta_w, delta_rho;
vector_var relative_interface;
vector_var vel_lattice, rho_u_interface, u_interface;
vector_var delta_u1, delta_v1, delta_w1, delta_rho1;
vector_var cell_normal;
vector_var flux_e_alpha[9];
vector_var u, v, w, rho;
std::vector<vector_var> e_alpha;
std::vector<int> cell_nodes;
// vector_var flux_e_alpha;
residuals convergence_residual;
flux_var x_flux, y_flux, z_flux;
flux_var cell_flux;
flux_var debug[4], debug_flux[4], arti_debug[4];
flux_var dbug[4];
flux_var int_debug[4];
int timesteps;
int wall = 0;
tecplot_output<double> tecplot;
///Initialisations
dt = domain.dt; // timestepping for streaming // non-dim equals 1
c = 1; // assume lattice spacing is equal to streaming timestep
cs = c / sqrt(3);
visc = (globals.tau - 0.5) / 3 * domain.dt;
local_tolerance = globals.tolerance;
delta_t = globals.time_marching_step;
timesteps = ceil(globals.simulation_length);
output_dir = globals.output_file + "/error.txt";
decay_dir = globals.output_file + "/vortex_error.txt";
max_u_dir = globals.output_file + "/max_u.txt";
// error_output.open("/home/brendan/Dropbox/PhD/Test Cases/Couette Flow/error.txt", ios::out);
error_output.open(output_dir.c_str(), ios::out);
output_dir = globals.output_file + "/residual_log.txt";
debug_log.open(output_dir.c_str(), ios::out);
vortex_output.open(decay_dir.c_str(), ios::out);
max_u.open(max_u_dir.c_str(), ios::out);
time = 0;
angular_freq = visc * pow(globals.womersley_no, 2) / pow(Mesh.get_Y() / 2, 2);
force = -init_conds.pressure_gradient;
time = 0;
td = 100000000000000000;
grads.pre_fill_LHS_and_RHS_matrix(bcs, Mesh, domain, soln, globals);
populate_cfl_areas(cfl_areas, Mesh);
debug_log << "t,rk,i,res_rho,res_u,res_v,res_w,x,y,z, dt,visc,rho,u,v,ux,uy,uz,vx,vy,vz" << endl;
/// CUDA checks***********************************//////////////////////
cudaDeviceProp deviceProp;
int argc;
const char *argv = " ";
/*int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0) {
printf("exiting...\n");
exit(EXIT_SUCCESS);
}*/
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, 0));
// Statistics about the GPU device
printf(
"> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
checkCudaErrors(cudaSetDevice(0));
// num bloacks for different gpu kernels
int blockSize = 256;
int numBlocks = (Mesh.get_total_cells() + blockSize - 1) / blockSize;
int n_Cell_Blocks = (Mesh.get_n_cells() + blockSize - 1) / blockSize;
int n_bc_Blocks = (Mesh.get_num_bc() + blockSize - 1) / blockSize;
int n_face_Blocks = (Mesh.get_n_faces() + blockSize - 1) / blockSize;
#if defined IMMERSED_BOUNDARY_METHOD
int n_node_Blocks = (total_object_nodes + blockSize - 1) / blockSize;
#endif
double delta_x = domain.dt * 2;
double4 convergence;
convergence.w = 100000000000;
double *res_rho_block;
res_rho_block = new double[n_Cell_Blocks];
double *res_u_block;
res_u_block = new double[n_Cell_Blocks];
double *res_v_block;
res_v_block = new double[n_Cell_Blocks];
double *res_w_block;
res_w_block = new double[n_Cell_Blocks];
//arrrays for CUDA
{
checkCudaErrors(cudaMallocManaged(&res_rho_block, n_Cell_Blocks * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&res_u_block, n_Cell_Blocks * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&res_v_block, n_Cell_Blocks * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&res_w_block, n_Cell_Blocks * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&d_delta_t_local, Mesh.get_total_cells() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&d_cfl_areas, Mesh.get_total_cells() * sizeof(double3)));
checkCudaErrors(cudaMallocManaged(&temp_soln, Mesh.get_total_cells() * sizeof(double4)));
checkCudaErrors(cudaMallocManaged(&soln_t0, Mesh.get_total_cells() * sizeof(double4)));
checkCudaErrors(cudaMallocManaged(&soln_t1, Mesh.get_total_cells() * sizeof(double4)));
checkCudaErrors(cudaMallocManaged(&cell_volume, Mesh.get_total_cells() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&gradient_stencil, Mesh.get_n_cells() * sizeof(int) * 6));
checkCudaErrors(cudaMallocManaged(&mesh_owner, Mesh.get_n_faces() * sizeof(int)));
checkCudaErrors(cudaMallocManaged(&mesh_neighbour, Mesh.get_n_faces() * sizeof(int)));
checkCudaErrors(cudaMallocManaged(&cell_centroid, Mesh.get_total_cells() * sizeof(double3)));
checkCudaErrors(cudaMallocManaged(&face_centroid, Mesh.get_n_faces() * sizeof(double3)));
checkCudaErrors(cudaMallocManaged(&face_normal, Mesh.get_n_faces() * sizeof(double3)));
checkCudaErrors(cudaMallocManaged(&surface_area, Mesh.get_n_faces() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&streaming_dt, Mesh.get_n_faces() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&cell_flux_arr, Mesh.get_n_faces() * sizeof(double4)));
checkCudaErrors(cudaMallocManaged(&force_x, Mesh.get_total_cells() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&force_y, Mesh.get_total_cells() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&force_z, Mesh.get_total_cells() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&res_rho, Mesh.get_total_cells() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&res_u, Mesh.get_total_cells() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&res_v, Mesh.get_total_cells() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&res_w, Mesh.get_total_cells() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&res_face, Mesh.get_n_faces() * sizeof(double4)));
checkCudaErrors(cudaMallocManaged(&local_fneq, Mesh.get_total_cells() * sizeof(double)));
//arrays for bcs
checkCudaErrors(cudaMallocManaged(&bcs_arr, Mesh.get_num_bc() * sizeof(double4)));
checkCudaErrors(cudaMallocManaged(&bcs_rho_type, Mesh.get_num_bc() * sizeof(int)));
checkCudaErrors(cudaMallocManaged(&bcs_vel_type, Mesh.get_num_bc() * sizeof(int)));
//arrrays for CUDA Gradient
checkCudaErrors(cudaMallocManaged(&grad_rho_arr, Mesh.get_total_cells() * sizeof(double3)));
checkCudaErrors(cudaMallocManaged(&grad_u_arr, Mesh.get_total_cells() * sizeof(double3)));
checkCudaErrors(cudaMallocManaged(&grad_v_arr, Mesh.get_total_cells() * sizeof(double3)));
checkCudaErrors(cudaMallocManaged(&grad_w_arr, Mesh.get_total_cells() * sizeof(double3)));
checkCudaErrors(cudaMallocManaged(&RHS_arr, Mesh.get_n_cells() * sizeof(double3) * 6));
checkCudaErrors(cudaMallocManaged(&LHS_xx, Mesh.get_n_cells() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&LHS_xy, Mesh.get_n_cells() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&LHS_xz, Mesh.get_n_cells() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&LHS_yx, Mesh.get_n_cells() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&LHS_yy, Mesh.get_n_cells() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&LHS_yz, Mesh.get_n_cells() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&LHS_zx, Mesh.get_n_cells() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&LHS_zy, Mesh.get_n_cells() * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&LHS_zz, Mesh.get_n_cells() * sizeof(double)));
#if defined IMMERSED_BOUNDARY_METHOD
//arrays for lagrangian objects
checkCudaErrors(cudaMallocManaged(&object_tet_connectivity, total_object_tets * 3 * sizeof(int)));
checkCudaErrors(cudaMallocManaged(&object_x_ref, total_object_nodes * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&object_y_ref, total_object_nodes * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&object_z_ref, total_object_nodes * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&object_x, total_object_nodes * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&object_y, total_object_nodes * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&object_z, total_object_nodes * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&object_x0, total_object_nodes * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&object_y0, total_object_nodes * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&object_z0, total_object_nodes * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&object_vel_x, total_object_nodes * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&object_vel_y, total_object_nodes * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&object_vel_z, total_object_nodes * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&object_force_x, total_object_nodes * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&object_force_y, total_object_nodes * sizeof(double)));
checkCudaErrors(cudaMallocManaged(&object_force_z, total_object_nodes * sizeof(double)));
#endif
}
populate_e_alpha(e_alpha, h_lattice_weight, c, globals.PI, 15);
checkCudaErrors(cudaMemcpyToSymbol(lattice_weight, h_lattice_weight, 15 * sizeof(double)));
/// Sync before CUDA array used
cudaDeviceSynchronize();
populate_cfl_areas(d_cfl_areas, Mesh);
// transfer class members to arrays for CUDA
{
soln_to_double(temp_soln, soln, Mesh.get_total_cells());
mesh_to_array(cell_volume, Mesh, Mesh.get_total_cells(), "volume");
mesh_to_array(gradient_stencil, Mesh, Mesh.get_n_cells(), "gradient_stencil");
mesh_to_array(mesh_owner, Mesh, Mesh.get_n_faces(), "mesh_owner");
mesh_to_array(mesh_neighbour, Mesh, Mesh.get_n_faces(), "mesh_neighbour");
mesh_to_array(surface_area, Mesh, Mesh.get_n_faces(), "surface_area");
mesh_to_array(streaming_dt, Mesh, Mesh.get_n_faces(), "streaming_dt");
mesh_to_array_double(face_normal, Mesh, Mesh.get_n_faces(), "face_normal");
mesh_to_array_double(cell_centroid, Mesh, Mesh.get_total_cells(), "cell_centroid");
mesh_to_array_double(face_centroid, Mesh, Mesh.get_n_faces(), "face_centroid");
gradients_to_array(LHS_xx, grads, Mesh.get_n_cells(), "LHS_xx");
gradients_to_array(LHS_xy, grads, Mesh.get_n_cells(), "LHS_xy");
gradients_to_array(LHS_xz, grads, Mesh.get_n_cells(), "LHS_xz");
gradients_to_array(LHS_yx, grads, Mesh.get_n_cells(), "LHS_yx");
gradients_to_array(LHS_yy, grads, Mesh.get_n_cells(), "LHS_yy");
gradients_to_array(LHS_yz, grads, Mesh.get_n_cells(), "LHS_yz");
gradients_to_array(LHS_zx, grads, Mesh.get_n_cells(), "LHS_zx");
gradients_to_array(LHS_zy, grads, Mesh.get_n_cells(), "LHS_zy");
gradients_to_array(LHS_zz, grads, Mesh.get_n_cells(), "LHS_zz");
gradients_to_array_double(RHS_arr, grads, Mesh.get_n_cells(), "RHS_array");
bcs_to_array_double(bcs_arr, bcs, Mesh.get_num_bc(), "bcs");
bcs_to_array(bcs_rho_type, bcs, Mesh.get_num_bc(), "rho_type");
bcs_to_array(bcs_vel_type, bcs, Mesh.get_num_bc(), "vel_type");
}
#if defined IMMERSED_BOUNDARY_METHOD
lagrangian_object_to_array(object_vec, object_x_ref, object_y_ref, object_z_ref, object_x, object_y, object_z, object_x0, object_y0, object_z0, object_tet_connectivity);
#endif
cudaProfilerStart();
clone_a_to_b << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), temp_soln, soln_t1); // soln_t0 holds macro variable solution at start of time step
// loop in time
for (int t = 0; t < timesteps; t++) {
// soln_t0 is the solution at the start of every
// RK step.(rk = n) Temp_soln holds the values at end of
// step.(rk = n+1)
clone_a_to_b << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), soln_t1, soln_t0);// soln_t0 holds macro variable solution at start of time step
post_kernel_checks();
//womersley flow peculiarities
if (globals.testcase == 4) {
wom_cos = cos(angular_freq * t * delta_t);
force = -init_conds.pressure_gradient * wom_cos;
}
//local timestepping calculation
// can be removed for uniform grids and replaced with a single calc
get_cfl_device <<< n_Cell_Blocks, blockSize >>> (Mesh.get_n_cells(), temp_soln, cell_volume, d_delta_t_local, d_cfl_areas, globals.time_marching_step,
globals.max_velocity,globals.pre_conditioned_gamma, globals.visc, globals.gpu_time_stepping);
post_kernel_checks();
#if defined IMMERSED_BOUNDARY_METHOD
// need to propogate node position based on final RK4 velocity
interpolate_velocities_on_nodes << < n_node_Blocks, blockSize >> > (total_object_nodes, object_vel_x, object_vel_y, object_vel_z, object_x, object_y, object_z,
mesh_origin, mesh_lengths, delta_x, temp_soln, Mesh.get_n_cells());
/* interpolate_velocities_on_nodes_cos_kernel << < n_node_Blocks, blockSize >> > (total_object_nodes, object_vel_x, object_vel_y, object_vel_z, object_x, object_y, object_z,
mesh_origin, mesh_lengths, delta_x, temp_soln, globals.PI);*/
update_node_positions_rk4 << < n_node_Blocks, blockSize >> > (total_object_nodes, object_vel_x, object_vel_y, object_vel_z, object_x, object_y, object_z, delta_t, object_vec[0].num_nodes,
object_x0, object_y0, object_z0);
/*fill_zero << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), force_x);*/
fill_double << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), force_x, init_conds.pressure_gradient);
post_kernel_checks();
fill_zero << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), force_y);
post_kernel_checks();
fill_zero << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), force_z);
post_kernel_checks();
//for now assume uniform stiffness, radius etc.
update_node_forces << < n_node_Blocks, blockSize >> > (total_object_nodes, object_force_x, object_force_y, object_force_z, object_x, object_y, object_z, object_x_ref, object_y_ref, object_z_ref,
object_vec[0].stiffness, object_vec[0].radius, globals.PI, object_vec[0].num_nodes, object_vel_x, object_vel_y, object_vel_z, delta_t, object_vec[0].depth);
//assume uniform grid for now, need moving least squares stencil in the future
spread_forces_on_structured_grid << < n_node_Blocks, blockSize >> > (total_object_nodes, object_force_x, object_force_y, object_force_z, object_x, object_y, object_z,
mesh_origin, mesh_lengths, delta_x, force_x, force_y, force_z, Mesh.get_n_cells());
/*spread_forces_on_structured_grid_cos_kernel << < n_node_Blocks, blockSize >> > (total_object_nodes, object_force_x, object_force_y, object_force_z, object_x, object_y, object_z,
mesh_origin, mesh_lengths, delta_x, force_x, force_y, force_z, Mesh.get_n_cells(), globals.PI);*/
#endif
for (int rk = 0; rk < rk4.timesteps; rk++) {
drag_t1 = 0.0;
//update temp_soln boundary conditions
update_unstructured_bcs << < n_bc_Blocks, blockSize >> > (Mesh.get_num_bc(), Mesh.get_n_neighbours(), Mesh.get_n_cells(), mesh_owner, bcs_rho_type, bcs_vel_type, temp_soln, bcs_arr, cell_centroid,domain.Y);
//set to zeros
fill_zero << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_rho);
post_kernel_checks();
fill_zero << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_u);
post_kernel_checks();
fill_zero << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_v);
post_kernel_checks();
fill_zero << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_w);
post_kernel_checks();
// time2 = clock();
get_interior_gradients <<< n_Cell_Blocks, blockSize >>> ( Mesh.get_n_cells(), gradient_stencil, temp_soln,
RHS_arr,LHS_xx, LHS_xy, LHS_xz,LHS_yx, LHS_yy, LHS_yz,LHS_zx, LHS_zy, LHS_zz,
grad_rho_arr, grad_u_arr, grad_v_arr, grad_w_arr);
post_kernel_checks();
//get boundary condition gradients
get_bc_gradients << < n_bc_Blocks, blockSize >> > (Mesh.get_num_bc(), Mesh.get_n_neighbours(), Mesh.get_n_cells(), mesh_owner, bcs_rho_type, bcs_vel_type, temp_soln,
face_normal, cell_centroid, bcs_arr,
grad_rho_arr, grad_u_arr, grad_v_arr, grad_w_arr);
post_kernel_checks();
//time3 = clock();
//std::cout << "CPU Cycles Gradients:" << double(time3 - time2) << std::endl;
wall = 0;
// loop through each cell and exclude the ghost cells
//using n_cells here rather than total_cells
cudaDeviceSynchronize();
calc_face_flux << < n_face_Blocks, blockSize >> > (Mesh.get_n_faces(), temp_soln, cell_volume, surface_area, mesh_owner, mesh_neighbour, cell_centroid, face_centroid, face_normal,
streaming_dt, grad_rho_arr, grad_u_arr, grad_v_arr, grad_w_arr, Mesh.get_n_cells(), (1/ globals.pre_conditioned_gamma), local_fneq, globals.visc,
res_rho, res_u,res_v,res_w,res_face,
bcs_rho_type, bcs_vel_type, bcs_arr,globals.PI);
post_kernel_checks();
cudaDeviceSynchronize();
add << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_u, force_x);
add << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_v, force_y);
add << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_w, force_z);
post_kernel_checks();
cudaDeviceSynchronize();
/*
for (int i = 0; i < Mesh.get_n_cells(); i++) {
debug_log << t << ", " << rk << ", " << i << ", " << res_rho[i] << ", " <<
res_u[i] << ", " << res_v[i] << ", " << res_w[i]
<< ", " <<
Mesh.get_centroid_x(i) << " , " << Mesh.get_centroid_y(i) << "," << Mesh.get_centroid_z(i) << "," <<
delta_t_local[i] << " , " << local_fneq[i] << "," <<
soln.get_rho(i) << "," << soln.get_u(i) << " , " << soln.get_v(i) << " , " <<
grad_u_arr[i].x << " , " << grad_u_arr[i].y << " , " << grad_u_arr[i].z << " , " <<
grad_v_arr[i].x << " , " << grad_v_arr[i].y << " , " << grad_v_arr[i].z << " , " <<
grad_w_arr[i].x << " , " << grad_w_arr[i].y << "," << grad_w_arr[i].z
<< endl;
}*/
//Update solutions //update RK values
time_integration << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), rk, rk4.timesteps, d_delta_t_local, soln_t0, soln_t1, temp_soln,
res_rho, res_u, res_v, res_w);
post_kernel_checks();
#if defined IMMERSED_BOUNDARY_METHOD
//// //for now assume uniform stiffness, radius etc.
//interpolate_velocities_on_nodes << < n_node_Blocks, blockSize >> > (total_object_nodes, object_vel_x, object_vel_y, object_vel_z, object_x, object_y, object_z,
// mesh_origin, mesh_lengths, delta_x, temp_soln);
///*interpolate_velocities_on_nodes_cos_kernel << < n_node_Blocks, blockSize >> > (total_object_nodes, object_vel_x, object_vel_y, object_vel_z, object_x, object_y, object_z,
// mesh_origin, mesh_lengths, delta_x, temp_soln,globals.PI);*/
//update_node_positions << < n_node_Blocks, blockSize >> > (total_object_nodes, object_vel_x, object_vel_y, object_vel_z, object_x, object_y, object_z, delta_t, object_vec[0].num_nodes,rk);
#endif
}
//get square of residuals
square << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_rho);
square << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_u);
square << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_v);
square << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), res_w);
//reduce add residuals
total<256> << < n_Cell_Blocks, blockSize >> > (res_rho, res_rho_block, Mesh.get_n_cells());
total<256> << < n_Cell_Blocks, blockSize >> > (res_u, res_u_block, Mesh.get_n_cells());
total<256> << < n_Cell_Blocks, blockSize >> > (res_v, res_v_block, Mesh.get_n_cells());
total<256> << < n_Cell_Blocks, blockSize >> > (res_w, res_w_block, Mesh.get_n_cells());
post_kernel_checks();
cudaDeviceSynchronize();
convergence_residual.reset();
convergence_residual.l2_norm_rms_moukallad(globals, res_rho_block, res_u_block, res_v_block, res_w_block, n_Cell_Blocks, Mesh.get_n_cells());
/*
calc_total_residual << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), convergence, res_rho, res_u, res_v, res_w);
post_kernel_checks();*/
/*check_error << < n_Cell_Blocks, blockSize >> > (Mesh.get_n_cells(), temp_soln);
post_kernel_checks();*/
//convergence_residual.ansys_5_iter_rms(t);
time = t * delta_t;
if (mg == 0 && t%globals.output_step == 1) {
soln.clone(temp_soln);
error_output << t << ", " << convergence_residual.max_error() << ", " <<
convergence_residual.rho_rms << ", " << convergence_residual.u_rms << ", " <<
convergence_residual.v_rms << ", " <<
convergence_residual.w_rms << " , FMG cycle: " << fmg << endl;
cout << "time t=" << time << " error e =" << convergence_residual.max_error()
<< " delta_t:" << delta_t << std::endl;
//max_u << t << "," << soln.get_u(center_node) << "," << force << endl;
cout << "drag: " << drag_t1 << endl;
//only output at decreasing order of magnitudes - save space on hard drive
if (convergence_residual.max_error() < pow(10, output_residual_threshold)) {
tecplot.tecplot_output_unstructured_soln(globals, Mesh, soln, bcs, t, pp, residual_worker, delta_t_local, local_fneq);
tecplot.tecplot_output_lagrangian_object_gpu(object_vel_x, object_vel_y, object_vel_z, object_x, object_y, object_z, object_force_x, object_force_y, object_force_z, globals, domain, t
,object_vec[0].name, object_vec[0].num_nodes, object_vec[0].depth_nodes, object_vec[0].radial_nodes);
output_residual_threshold = output_residual_threshold - 1;
soln.output(globals.output_file, globals, domain);
cudaProfilerStop();
}
//soln.output_centrelines(globals.output_file,globals,Mesh,time);
}
if (convergence_residual.max_error() < local_tolerance || time > td) {
if (mg == 0) {
soln.clone(temp_soln);
cout << "convergence" << endl;
cout << "time t=" << time << " error e =" << convergence_residual.max_error()
<< " delta_t:" << delta_t << std::endl;
error_output.close();
debug_log.close();
vortex_output.close();
max_u.close();
// vortex calcs
soln.update_unstructured_bcs(bcs, Mesh, domain, t);
grads.Get_LS_Gradients(bcs, Mesh, domain, soln, globals);
pp.cylinder_post_processing(Mesh, globals, grads, bcs, soln, domain, wall_shear_stress);
// pp.calc_vorticity(x_gradients,y_gradients);
//pp.calc_streamfunction(Mesh,globals,bcs);
tecplot.tecplot_output_unstructured_soln(globals, Mesh, soln, bcs, t, pp, residual_worker, delta_t_local, local_fneq);
tecplot.tecplot_output_lagrangian_object_gpu(object_vel_x, object_vel_y, object_vel_z, object_x, object_y, object_z, object_force_x, object_force_y, object_force_z, globals, domain, timesteps
, object_vec[0].name, object_vec[0].num_nodes, object_vec[0].depth_nodes, object_vec[0].radial_nodes);
//soln.output_centrelines(globals.output_file,globals,Mesh,time);
}
cudaProfilerStop();
return;
}
}
// pp.calc_vorticity(x_gradients,y_gradients);
//pp.calc_streamfunction(Mesh,globals,bcs);
cudaProfilerStop();
soln.clone(temp_soln);
cout << "out of time" << endl;
error_output.close();
vortex_output.close();
debug_log.close();
max_u.close();
pp.cylinder_post_processing(Mesh, globals, grads, bcs, soln, domain, wall_shear_stress);
tecplot.tecplot_output_unstructured_soln(globals, Mesh, soln, bcs, timesteps, pp, residual_worker, delta_t_local, local_fneq);
tecplot.tecplot_output_lagrangian_object_gpu(object_vel_x, object_vel_y, object_vel_z, object_x, object_y, object_z, object_force_x, object_force_y, object_force_z, globals, domain, timesteps
, object_vec[0].name, object_vec[0].num_nodes, object_vec[0].depth_nodes, object_vec[0].radial_nodes);
}
void gpu_solver::get_weighted_average(gradients &grads, int i, int neighbour, double m1, double m2,
vector_var &u, vector_var &v, vector_var &w, vector_var &rho, unstructured_mesh &mesh)
{
double a, b, x, y, z;
//check for boundary condition
//use boundary cell gradients as these are at cell face
if (neighbour > mesh.get_n_cells()) {
x = grads.get_u(neighbour).x;
y = grads.get_u(neighbour).y;
z = grads.get_u(neighbour).z;
u.set_equal(x, y, z);
x = grads.get_v(neighbour).x;
y = grads.get_v(neighbour).y;
z = grads.get_v(neighbour).z;
v.set_equal(x, y, z);
x = grads.get_w(neighbour).x;
y = grads.get_w(neighbour).y;
z = grads.get_w(neighbour).z;
w.set_equal(x, y, z);
x = grads.get_rho(neighbour).x;
y = grads.get_rho(neighbour).y;
z = grads.get_rho(neighbour).z;
rho.set_equal(x, y, z);
}
else {
a = m1 + m2;
b = m2 / a;
a = m1 / a;
x = grads.get_u(i).x * a + grads.get_u(neighbour).x *b;
y = grads.get_u(i).y * a + grads.get_u(neighbour).y *b;
z = grads.get_u(i).z * a + grads.get_u(neighbour).z *b;
u.set_equal(x, y, z);
x = grads.get_v(i).x * a + grads.get_v(neighbour).x *b;
y = grads.get_v(i).y * a + grads.get_v(neighbour).y *b;
z = grads.get_v(i).z * a + grads.get_v(neighbour).z *b;
v.set_equal(x, y, z);
x = grads.get_w(i).x * a + grads.get_w(neighbour).x *b;
y = grads.get_w(i).y * a + grads.get_w(neighbour).y *b;
z = grads.get_w(i).z * a + grads.get_w(neighbour).z *b;
w.set_equal(x, y, z);
x = grads.get_rho(i).x * a + grads.get_rho(neighbour).x *b;
y = grads.get_rho(i).y * a + grads.get_rho(neighbour).y *b;
z = grads.get_rho(i).z * a + grads.get_rho(neighbour).z *b;
rho.set_equal(x, y, z);
}
}
vector_var gpu_solver::get_e_alpha(int k, double &lattice_weight, double c, double PI) {
vector_var temp;
int x, y, z;
//get e_alpha again
if (k > 0 && k < 5) { //
x = round(cos((k - 1)*PI / 2) * c);
y = round(sin((k - 1)*PI / 2)* c);
z = 0; //update in 3D
lattice_weight = 1.0 / 9.0;
}
else if (k > 4) {
x = round(sqrt(2) * cos((k - 5)*PI / 2 + PI / 4) * c);
y = round(sqrt(2) * sin((k - 5)*PI / 2 + PI / 4) * c);
z = 0; //update in 3D
lattice_weight = 1.0 / 36.0;
}
else {
x = 0;
y = 0;
z = 0;
lattice_weight = 4.0 / 9.0;
}
temp.x = x;
temp.y = y;
temp.z = z;
return temp;
}
void gpu_solver::populate_e_alpha(vector<vector_var> &e_alpha, double *lattice_weight, double c, double PI, int j) {
vector_var temp;
int x[15] = { 0,1,-1,0,0,0,0,1,-1, 1,-1,1,-1,-1,1 };
int y[15] = { 0,0,0,1,-1,0,0,1,-1,1,-1,-1,1,1,-1 };
int z[15] = { 0,0,0,0,0,1,-1,1,-1,-1,1,1,-1,1,-1 };
//get e_alpha again
for (int k = 0; k < j; k++) {
if (k > 0 && k < 7) { //
lattice_weight[k] = 1.0 / 9.0;
}
else if (k > 6) {
lattice_weight[k] = 1.0 / 72.0;
}
else {
lattice_weight[k] = 2.0 / 9.0;
}
temp.x = x[k];
temp.y = y[k];
temp.z = z[k];
e_alpha.push_back(temp);
}
}
void gpu_solver::get_cell_gradients(Mesh &Mesh, int i, int neighbour, int j, Solution &temp_soln,
vector_var &delta_rho, vector_var &delta_rho1,
vector_var &delta_u, vector_var &delta_u1,
vector_var &delta_v, vector_var &delta_v1,
Boundary_Conditions &bcs) {
int neighbour_1, neighbour_2;
vector_var cell_1, cell_2;
// is it N-S or E-W
if (j == 2) {
neighbour_1 = Mesh.get_w_node(i);
neighbour_2 = Mesh.get_e_node(i);
}
else {
neighbour_1 = Mesh.get_s_node(i);
neighbour_2 = Mesh.get_n_node(i);
}
// get neighbouring cells of cells
Mesh.get_centroid(neighbour_1, cell_1);
Mesh.get_centroid(neighbour_2, cell_2);
delta_rho.Get_Gradient(temp_soln.get_rho(neighbour_1), temp_soln.get_rho(neighbour_2)
, cell_1, cell_2);
delta_u.Get_Gradient(temp_soln.get_u(neighbour_1), temp_soln.get_u(neighbour_2)
, cell_1, cell_2);
delta_v.Get_Gradient(temp_soln.get_v(neighbour_1), temp_soln.get_v(neighbour_2)
, cell_1, cell_2);
// get gradient of neighbouring cell
if (j == 2) {
neighbour_1 = Mesh.get_w_node(neighbour);
neighbour_2 = Mesh.get_e_node(neighbour);
}
else {
neighbour_1 = Mesh.get_s_node(neighbour);
neighbour_2 = Mesh.get_n_node(neighbour);
}
// get neighbouring cells of cells
Mesh.get_centroid(neighbour_1, cell_1);
Mesh.get_centroid(neighbour_2, cell_2);
delta_rho1.Get_Gradient(temp_soln.get_rho(neighbour_1), temp_soln.get_rho(neighbour_2)
, cell_1, cell_2);
delta_u1.Get_Gradient(temp_soln.get_u(neighbour_1), temp_soln.get_u(neighbour_2)
, cell_1, cell_2);
delta_v1.Get_Gradient(temp_soln.get_v(neighbour_1), temp_soln.get_v(neighbour_2)
, cell_1, cell_2);
}
void gpu_solver::cell_interface_variables(int j, int i, vector_var &interface_node, int &neighbour, double &interface_area,
vector_var &cell_normal, Boundary_Conditions &boundary_conditions, bc_var &bc,
Mesh &Mesh, vector_var &cell_2) {
switch (j) {
case 0: // West
interface_node.x = Mesh.get_west_x(i);
interface_node.y = Mesh.get_west_y(i);
interface_node.z = Mesh.get_west_z(i);
neighbour = Mesh.get_w_node(i);
interface_area = Mesh.get_w_area(i);
cell_normal.x = Mesh.get_w_i(i);
cell_normal.y = Mesh.get_w_j(i);
cell_normal.z = Mesh.get_w_k(i);
break;
case 1: // South
interface_node.x = Mesh.get_south_x(i);
interface_node.y = Mesh.get_south_y(i);
interface_node.z = Mesh.get_south_z(i);
neighbour = Mesh.get_s_node(i);
interface_area = Mesh.get_s_area(i);
cell_normal.x = Mesh.get_s_i(i);
cell_normal.y = Mesh.get_s_j(i);
cell_normal.z = Mesh.get_s_k(i);
break;
case 2: // East
interface_node.x = Mesh.get_east_x(i);
interface_node.y = Mesh.get_east_y(i);
interface_node.z = Mesh.get_east_z(i);
interface_area = Mesh.get_e_area(i);
neighbour = Mesh.get_e_node(i);
cell_normal.x = Mesh.get_e_i(i);
cell_normal.y = Mesh.get_e_j(i);
cell_normal.z = Mesh.get_e_k(i);
break;
case 3: // North
interface_node.x = Mesh.get_north_x(i);
interface_node.y = Mesh.get_north_y(i);
interface_node.z = Mesh.get_north_z(i);
neighbour = Mesh.get_n_node(i);
interface_area = Mesh.get_n_area(i);
cell_normal.x = Mesh.get_n_i(i);
cell_normal.y = Mesh.get_n_j(i);
cell_normal.z = Mesh.get_n_k(i);
break;
case 4: // Front
interface_node.x = Mesh.get_front_x(i);
interface_node.y = Mesh.get_front_y(i);
interface_node.z = Mesh.get_front_z(i);
neighbour = Mesh.get_f_node(i);
interface_area = Mesh.get_f_area(i);
cell_normal.x = Mesh.get_f_i(i);
cell_normal.y = Mesh.get_f_j(i);
cell_normal.z = Mesh.get_f_k(i);
break;
case 5: // Back
interface_node.x = Mesh.get_back_x(i);
interface_node.y = Mesh.get_back_y(i);
interface_node.z = Mesh.get_back_z(i);
neighbour = Mesh.get_b_node(i);
interface_area = Mesh.get_b_area(i);
cell_normal.x = Mesh.get_b_i(i);
cell_normal.y = Mesh.get_b_j(i);
cell_normal.z = Mesh.get_b_k(i);
break;
}
// cell_2.x = Mesh.get_centroid_x(neighbour);
// cell_2.y = Mesh.get_centroid_y((neighbour));
// cell_2.z = Mesh.get_centroid_z(neighbour);
}
void gpu_solver::cell_interface_variables(int face, int i, vector_var &interface_node, int &neighbour, double &interface_area,
vector_var &cell_normal, Boundary_Conditions &boundary_conditions, bc_var &bc,
unstructured_mesh &Mesh, vector_var &cell_2, vector_var &cell_1) {
interface_node.x = Mesh.get_face_x(face);
interface_node.y = Mesh.get_face_y(face);
interface_node.z = Mesh.get_face_z(face);
neighbour = Mesh.get_mesh_neighbour(face);
interface_area = Mesh.get_face_area(face);
cell_normal.x = Mesh.get_face_i(face);
cell_normal.y = Mesh.get_face_j(face);
cell_normal.z = Mesh.get_face_k(face);
cell_2.x = Mesh.get_centroid_x(neighbour);
cell_2.y = Mesh.get_centroid_y((neighbour));
cell_2.z = Mesh.get_centroid_z(neighbour);
}
void gpu_solver::get_cell_nodes(std::vector<int> &cell_nodes, Boundary_Conditions &bcs, int neighbour,
Mesh &Mesh, int i, int j) {
//current cell
cell_nodes.clear();
if (bcs.get_bc(i) || bcs.get_bc(neighbour)) {
cell_nodes.push_back(i);
cell_nodes.push_back(neighbour);
}
else if (j == 2) {
cell_nodes.push_back(i);
cell_nodes.push_back(Mesh.get_n_node(i));
//cell_nodes.push_back(Mesh.get_e_node(i));
//cell_nodes.push_back(Mesh.get_w_node(i));
cell_nodes.push_back(Mesh.get_s_node(i));
cell_nodes.push_back(neighbour);
cell_nodes.push_back(Mesh.get_n_node(neighbour));
//cell_nodes.push_back(Mesh.get_e_node(neighbour));
//cell_nodes.push_back(Mesh.get_w_node(neighbour));
cell_nodes.push_back(Mesh.get_s_node(neighbour));
}
else {
cell_nodes.push_back(i);
//cell_nodes.push_back(Mesh.get_n_node(i));
cell_nodes.push_back(Mesh.get_e_node(i));
cell_nodes.push_back(Mesh.get_w_node(i));
// cell_nodes.push_back(Mesh.get_s_node(i));
cell_nodes.push_back(neighbour);
//cell_nodes.push_back(Mesh.get_n_node(neighbour));
cell_nodes.push_back(Mesh.get_e_node(neighbour));
cell_nodes.push_back(Mesh.get_w_node(neighbour));
//cell_nodes.push_back(Mesh.get_s_node(neighbour));
}
}
//get CFL numbers for inviscid and viscous matrices
// see what time stepping results
void gpu_solver::populate_cfl_areas(double3 *cfl_areas, unstructured_mesh &Mesh) {
double area_x, area_y, area_z;
int face;
double3 temp;
for (int i = 0; i < Mesh.get_n_cells(); i++) {
area_x = 0;
area_y = 0;
area_z = 0;
// time step condition as per OpenFoam calcs
for (int f = 0; f < Mesh.gradient_faces[i].size(); f++) {
face = Mesh.gradient_faces[i][f];
// eigen values as per Zhaoli guo(2004) - preconditioning
//method as per Jiri Blasek: CFD Principles and Application Determination of Max time Step
// need to calulate correct direction of face vector
area_x = area_x + fabs(Mesh.get_face_i(face)*Mesh.get_face_area(face));
area_y = area_y + fabs(Mesh.get_face_j(face)*Mesh.get_face_area(face));
area_z = area_z + fabs(Mesh.get_face_k(face)*Mesh.get_face_area(face));
}
temp.x = area_x / 2;
temp.y = area_y / 2;
temp.z = area_z / 2;
cfl_areas[i] = temp;
}
return;
}
//get CFL numbers for inviscid and viscous matrices
// see what time stepping results
void gpu_solver::inverse_weighted_distance_interpolation(double &u, double &v, double &rho, Boundary_Conditions &bcs,
Mesh &Mesh, domain_geometry &domain, Solution &soln, vector_var &interface_node,
int k, int i, int neighbour, vector<vector_var> &e_alpha, int j, std::vector<int> &cell_nodes) {
// get interface node
double w_u, w_v, w_rho, w_sum, w; // weighted macros
// get 8 nodes'
w_u = 0.0;
w_v = 0.0;
w_rho = 0.0;
w_sum = 0.0;
double r;
r = 0.0;
double dt;
if (j == 2) {
dt = Mesh.get_delta_t_e(i);
}
else {
dt = Mesh.get_delta_t_n(i);
}
//get displacements
vector_var node_displacement, target_node;
// get target node
target_node.x = interface_node.x - e_alpha[k].x * dt;
target_node.y = interface_node.y - e_alpha[k].y * dt;
target_node.z = interface_node.z - e_alpha[k].z * dt;
for (auto &it : cell_nodes) {
node_displacement.x = Mesh.get_centroid_x(it) - target_node.x;
node_displacement.y = Mesh.get_centroid_y(it) - target_node.y;
node_displacement.z = Mesh.get_centroid_z(it) - target_node.z;
r = node_displacement.Magnitude();
//
if (r < 10e-5) {
u = soln.get_u(it);
w = soln.get_v(it);
rho = soln.get_rho(it);
return;
}
//get weight for this cc
w = pow(1 / r, 2.0);
// sum weighted cc values
w_u = w_u + w * soln.get_u(it);
w_v = w_v + w * soln.get_v(it);
w_rho = w_rho + w * soln.get_rho(it);
w_sum = w_sum + w;
}
// calc u v rho for target node
u = w_u / w_sum;
v = w_v / w_sum;
rho = w_rho / w_sum;
}
void gpu_solver::find_real_time(double* delta_t_local, double* local_time, bool* calc_face,
unstructured_mesh &Mesh, bool* calc_cell) {
// for each cell check cell calc check if time is greater than neighbouring cells;
int nb;
for (int i = 0; i < Mesh.get_total_cells(); i++) {
// first update actual time
if (calc_cell[i]) {
local_time[i] = local_time[i] + delta_t_local[i];
}
}
for (int i = 0; i < Mesh.get_total_cells(); i++) {
calc_cell[i] = true;
for (int j = 0; j < Mesh.gradient_cells[i].size(); j++) {
nb = Mesh.gradient_cells[i][j];
if (local_time[i] > local_time[nb]) {
calc_cell[i] = false;
j = Mesh.gradient_cells[i].size();
}
}
}
// then for each face calc if it should be calculated
for (int k = 0; k < Mesh.get_n_faces(); k++) {
calc_face[k] = false;
if (calc_cell[Mesh.get_mesh_owner(k)] || calc_cell[Mesh.get_mesh_neighbour(k)]) {
calc_face[k] = true;
}
}
}
void gpu_solver::post_kernel_checks() {
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
}
template <typename T>
void gpu_solver::bcs_to_array(T* target, Boundary_Conditions &bcs, int total_nodes, std::string name) {
for (int i = 0; i < total_nodes; i++) {
if (name.compare("vel_type") == 0) {
target[i] = bcs.get_vel_type(i);
}
else if (name.compare("rho_type") == 0) {
target[i] = bcs.get_rho_type(i);
}
}
}
template <typename T>
void gpu_solver::bcs_to_array_double(T* target, Boundary_Conditions &bcs, int total_nodes, std::string name) {
for (int i = 0; i < total_nodes; i++) {
if (name.compare("bcs") == 0) {
double4 temp;
temp.x = bcs.get_u(i);
temp.y = bcs.get_v(i);
temp.z = bcs.get_w(i);
temp.w = bcs.get_rho(i);
target[i] = temp;
}
}
}
void gpu_solver::lagrangian_object_to_array( std::vector<lagrangian_object> &obj_vec, double* &x_ref, double* &y_ref, double* &z_ref, double* &x, double* &y, double* & z,
double* &x0, double* &y0, double* & z0, int * & tet_connectivity) {
int n = 0;
for (int i = 0; i < obj_vec.size(); i++) {
for (int j = 0; j < obj_vec[i].num_nodes; j++) {
x_ref[n] = obj_vec[i].node_x_ref[j];
x[n] = obj_vec[i].node_x[j];
x0[n] = obj_vec[i].node_x[j];
y_ref[n] = obj_vec[i].node_y_ref[j];
y[n] = obj_vec[i].node_y[j];
y0[n] = obj_vec[i].node_y[j];
z_ref[n] = obj_vec[i].node_z_ref[j];
z[n] = obj_vec[i].node_z[j];
z0[n] = obj_vec[i].node_z[j];
n++;
}
// transfer tet related variables
for (int k = 0; k < obj_vec[i].num_tets*3; k++) {
tet_connectivity[k] = obj_vec[i].tet_connectivity[k];
}
}
}
template <typename T>
void gpu_solver::mesh_to_array(T* target, unstructured_mesh &mesh, int total_nodes, std::string name) {
for (int i = 0; i < total_nodes; i++) {
if (name.compare("volume") == 0) {
target[i] = mesh.get_cell_volume(i);
}
else if (name.compare("gradient_stencil") == 0) {
for (int j = 0; j < 6; j++) {
target[i * 6 + j] = mesh.gradient_cells[i][j];
}
}
else if(name.compare("mesh_owner") == 0) {
target[i] = mesh.get_mesh_owner(i);
}
else if (name.compare("mesh_neighbour") == 0) {
target[i] = mesh.get_mesh_neighbour(i);
}
else if (name.compare("surface_area") == 0) {
target[i] = mesh.get_face_area(i);
}
else if (name.compare("streaming_dt") == 0) {
target[i] = mesh.get_delta_t_face(i);
}
}
}
template <typename T>
void gpu_solver::mesh_to_array_double(T* target, unstructured_mesh &mesh, int total_nodes, std::string name)
{
for (int i = 0; i < total_nodes; i++) {
if (name.compare("cell_centroid") == 0) {
double3 temp;
temp.x = mesh.get_centroid_x(i);
temp.y = mesh.get_centroid_y(i);
temp.z = mesh.get_centroid_z(i);
target[i] = temp;
}else if (name.compare("face_normal") == 0) {
double3 temp;
temp.x = mesh.get_face_i(i);
temp.y = mesh.get_face_j(i);
temp.z = mesh.get_face_k(i);
target[i] = temp;
}
else if (name.compare("face_centroid") == 0) {
double3 temp;
temp.x = mesh.get_face_x(i);
temp.y = mesh.get_face_y(i);
temp.z = mesh.get_face_z(i);
target[i] = temp;
}
}
}
template <typename T>
void gpu_solver::gradients_to_array_double(T* target, gradients &grads, int total_nodes, std::string name)
{
for (int i = 0; i < total_nodes; i++) {
if (name.compare("RHS_array") == 0) {
for (int j = 0; j < 6; j++) {
double3 temp;
temp.x = double(grads.RHS_x[i * 6 + j]);
temp.y = double(grads.RHS_y[i * 6 + j]);
temp.z = double(grads.RHS_z[i * 6 + j]);
target[i * 6 + j] = temp;
}
}
}
}
template <typename T>
void gpu_solver::gradients_to_array(T* target, gradients &grads, int total_nodes, std::string name)
{
for (int i = 0; i < total_nodes; i++) {
if(name.compare("LHS_xx") == 0) {
target[i] = double(grads.LHS_xx[i]);
}
else if (name.compare("LHS_xy") == 0) {
target[i] = grads.LHS_xy[i];
}
else if (name.compare("LHS_xz") == 0) {
target[i] = grads.LHS_xz[i];
}
else if (name.compare("LHS_yx") == 0) {
target[i] = grads.LHS_yx[i];
}
else if (name.compare("LHS_yy") == 0) {
target[i] = grads.LHS_yy[i];
}
else if (name.compare("LHS_yz") == 0) {
target[i] = grads.LHS_yz[i];
}
else if (name.compare("LHS_zx") == 0) {
target[i] = grads.LHS_zx[i];
}
else if (name.compare("LHS_zy") == 0) {
target[i] = grads.LHS_zy[i];
}
else if (name.compare("LHS_zz") == 0) {
target[i] = grads.LHS_zz[i];
}
}
}
void gpu_solver::soln_to_double(double4* target, Solution &soln_a, int total_nodes) {
for (int i = 0; i < total_nodes; i++) {
double4 tmp;
tmp.w = soln_a.get_rho(i);
tmp.x = soln_a.get_u(i);
tmp.y = soln_a.get_v(i);
tmp.z = soln_a.get_w(i);
target[i] = tmp;
}
};
//get CFL numbers for inviscid and viscous matrices
// see what time stepping results
void gpu_solver::get_cfl(double &delta_t, Solution &soln
, unstructured_mesh &Mesh, global_variables &globals, double* delta_t_local, int* delta_t_frequency, Solution &cfl_areas) {
double factor;
double area_x_eigen, visc_eigen;
factor = globals.time_marching_step;
double visc_constant;
visc_constant = 4;
double min_delta_t, temp;
double effective_speed_of_sound;
//effective_speed_of_sound = 1/sqrt(3);
effective_speed_of_sound = globals.max_velocity* sqrt(1 - globals.pre_conditioned_gamma + pow(globals.pre_conditioned_gamma / sqrt(3) / globals.max_velocity, 2));
//loop through cells
min_delta_t = 100000000000;
for (int i = 0; i < Mesh.get_n_cells(); i++) {
delta_t_frequency[i] = 1;
// eigen values as per Zhaoli guo(2004) - preconditioning
//estimation of spectral radii s per Jiri Blasek: CFD Principles and Application Determination of Max time Step
area_x_eigen = 0;
area_x_eigen = (fabs(soln.get_u(i)) + effective_speed_of_sound)*cfl_areas.get_u(i)
+ (fabs(soln.get_v(i)) + effective_speed_of_sound)*cfl_areas.get_v(i)
+ (fabs(soln.get_w(i)) + effective_speed_of_sound)*cfl_areas.get_w(i);
area_x_eigen = area_x_eigen / globals.pre_conditioned_gamma;
//reducing preconditioning increases viscous flux - increases eigenvalue
visc_eigen = 2 * globals.visc / globals.pre_conditioned_gamma / soln.get_rho(i) / Mesh.get_cell_volume(i);
visc_eigen = visc_eigen * (cfl_areas.get_u(i)*cfl_areas.get_u(i) + cfl_areas.get_v(i)*cfl_areas.get_v(i) + cfl_areas.get_w(i)* cfl_areas.get_w(i));
area_x_eigen = area_x_eigen + visc_constant * visc_eigen;
// use smallest time step allowed
temp = factor * Mesh.get_cell_volume(i) / area_x_eigen;
if (temp < 0) {
min_delta_t = temp;
}
if (temp < min_delta_t) {
min_delta_t = temp;
}
if (globals.time_stepping == "local" || globals.time_stepping == "talts") {
delta_t_local[i] = temp;
}
else { //constant user defined time step
delta_t_local[i] = factor;
}
}
if (globals.time_stepping == "min") {
std::fill_n(delta_t_local, Mesh.get_n_cells(), min_delta_t);
}
if (globals.time_stepping == "talts") {
for (int i = 0; i < Mesh.get_n_cells(); i++) {
delta_t_frequency[i] = pow(2, floor(log2(delta_t_local[i] / min_delta_t)));
delta_t_local[i] = min_delta_t * delta_t_frequency[i];
}
}
return;
}
|
67e3486626df228b3c6e856b9069d94922017cf6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "spinKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
spinKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, );
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
spinKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
spinKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 67e3486626df228b3c6e856b9069d94922017cf6.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "spinKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
spinKernel<<<gridBlock,threadBlock>>>();
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
spinKernel<<<gridBlock,threadBlock>>>();
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
spinKernel<<<gridBlock,threadBlock>>>();
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
8a98ac36bff2a4ee9f5d2ed3a8b28f1d60d0802f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) Chris Choy (chrischoy@ai.stanford.edu).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#ifndef GPU_CONVOLUTION
#define GPU_CONVOLUTION
#include <iostream>
// Use the torch for GPU memory management. Thrust resize gives segfulat during
// debugging -g #include <torch/extension.h>
#include "convolution_hip.cuh"
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <typename Dtype, typename Itype, int BLOCK_SIZE>
__global__ void matmul(const Dtype *A, const int wA, const int hA,
const Dtype *B, const int wB, const int hB, Dtype *C,
const Itype *in_map, const Itype *out_map) {
// Use in_feat as A and kernel as B
// Block index
const int bx = blockIdx.x;
const int by = blockIdx.y;
// Thread index
const int tx = threadIdx.x;
const int ty = threadIdx.y;
// Coordinate. x is for rows, y is for columns.
const int x = BLOCK_SIZE * bx + tx;
const int y = BLOCK_SIZE * by + ty;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
Dtype Csub = 0;
const Itype in_row = y < hA ? in_map[y] : 0;
const Itype out_row = y < hA ? out_map[y] : 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int s = 0; s < wA; s += BLOCK_SIZE) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ Dtype As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ Dtype Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = ((s + tx) < wA && y < hA) ? A[wA * in_row + s + tx] : 0;
Bs[ty][tx] = ((s + ty) < hB && x < wB) ? B[wB * (s + ty) + x] : 0;
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
if (y < hA && x < wB)
atomicAdd(&C[wB * out_row + x], Csub);
// C[wB * out_row + x] += Csub;
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B^T, E = D^T * A
* wA is A's width and wB is B's width
*
* +---+
* |B^T|
* +-------+
* | | |
* | A | C |
* | | |
* | | |
* +------------------+
* | D^T | E |
* +----------+---+
*
*/
template <typename Dtype, typename Itype, int BLOCK_SIZE>
__global__ void matmul2(const Dtype *A, const int wA, const int hA,
const Dtype *B, const int wB, const int hB,
const Dtype *D, const int wD, const int hD, Dtype *C,
Dtype *E, const Itype *in_map, const Itype *out_map) {
// Use grad_out_feat as A, transposed kernel weight as B, and in_feat as D
// Block index
const int bx = blockIdx.x;
const int by = blockIdx.y;
// Thread index
const int tx = threadIdx.x;
const int ty = threadIdx.y;
// Coordinate. x is for rows, y is for columns.
const int x = BLOCK_SIZE * bx + tx;
const int y = BLOCK_SIZE * by + ty;
const Itype in_row = y < hA ? in_map[y] : 0;
const Itype out_row = y < hA ? out_map[y] : 0;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
Dtype Csub = 0;
Dtype Esub = 0;
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ Dtype As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ Dtype BTs[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Ds used to
// store the sub-matrix of D
__shared__ Dtype DTs[BLOCK_SIZE][BLOCK_SIZE];
// For Ds = D^T[...:..., ...:...], use the transposed grid dimension for A
DTs[ty][tx] = (x < wD && y < hD) ? D[wD * in_row + x] : 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int s = 0; s < wA; s += BLOCK_SIZE) {
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = ((s + tx) < wA && y < hA) ? A[wA * out_row + s + tx] : 0;
// Transposed kernel
BTs[ty][tx] = ((s + ty) < wB && x < hB) ? B[wB * x + s + ty] : 0;
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * BTs[k][tx];
}
// For Esub, reset to 0
Esub = 0;
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Esub += DTs[k][ty] * As[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
// For the E matrix which requires accmulation of multiple blocks, use
// atomic addition. This can be replaced with a more sophisticaed reduction
// algorithm.
if ((bx * BLOCK_SIZE + ty) < wD && (s + tx) < wA)
atomicAdd(&E[wA * (bx * BLOCK_SIZE + ty) + (s + tx)], Esub);
}
// Write the block sub-matrix to device memory;
// each thread writes one element
if (y < hA && x < hB)
atomicAdd(&C[hB * in_row + x], Csub);
}
template <typename Dtype, typename Itype>
void ConvolutionForwardKernelGPU(
const Dtype *d_in_feat, int in_nchannel, Dtype *d_out_feat,
int out_nchannel, const Dtype *d_kernel,
const std::vector<std::vector<Itype>> &in_maps,
const std::vector<std::vector<Itype>> &out_maps, int out_nrows,
Itype *d_scr, hipblasHandle_t cuhandle, hipStream_t stream) {
CUDA_CHECK_ARGS(hipDeviceSynchronize(), ". Error triggered from a previous kernel call.");
// For the in out buffer, use the pre allocated GPU memory space as thrust
// resize gives segfault. Also initializing it with torch allows us to
// allocate memory faster and efficiently.
int kernel_volume, n_active_in_volume, shared_mem_size = -1;
Itype *d_in_map, *d_out_map;
// Copy the in_map, out_map to GPU
kernel_volume = in_maps.size();
// Find the max_n_active for memory allocation
int max_n_active = -1;
for (int k = 0; k < kernel_volume; k++)
if (max_n_active < (int)(in_maps[k].size()))
max_n_active = (int)(in_maps[k].size());
d_in_map = d_scr;
d_out_map = d_in_map + max_n_active;
// Define the shared memory size
if ((in_nchannel > 16 && out_nchannel > 16 &&
in_nchannel * out_nchannel >= 512) ||
(in_nchannel > 24 && out_nchannel > 24))
shared_mem_size = 32;
else if (in_nchannel % 24 == 0 && out_nchannel % 24 == 0)
shared_mem_size = 24;
else if ((in_nchannel > 8 && out_nchannel > 8) ||
(in_nchannel % 16 == 0 && out_nchannel % 16 == 0))
shared_mem_size = 16;
else
shared_mem_size = 8;
dim3 threads(shared_mem_size, shared_mem_size);
// Iterate through each spatial kernel and get indices for in_map and out_map
for (int k = 0; k < kernel_volume; k++) {
n_active_in_volume = in_maps[k].size();
if (n_active_in_volume == 0)
continue;
// Copy (*p_in_maps)[k] to GPU
CUDA_CHECK(hipMemcpy(d_in_map, in_maps[k].data(),
sizeof(Itype) * n_active_in_volume,
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(d_out_map, out_maps[k].data(),
sizeof(Itype) * n_active_in_volume,
hipMemcpyHostToDevice));
int num_grid = (n_active_in_volume + shared_mem_size - 1) / shared_mem_size;
int num_div = (num_grid + MAX_GRID - 1) / MAX_GRID;
int step = (n_active_in_volume + num_div - 1) / num_div;
Itype *d_curr_in_map = d_in_map, *d_curr_out_map = d_out_map;
for (int s = 0; s < num_div; s++) {
int remainder = n_active_in_volume - step * s;
int curr_num_active = remainder < step ? remainder : step;
dim3 grid((out_nchannel + threads.x - 1) / threads.x,
(curr_num_active + threads.y - 1) / threads.y);
switch (shared_mem_size) {
case 32:
hipLaunchKernelGGL(( matmul<Dtype, Itype, 32>), dim3(grid), dim3(threads), 0, stream,
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, d_curr_in_map, d_curr_out_map);
break;
case 24:
hipLaunchKernelGGL(( matmul<Dtype, Itype, 24>), dim3(grid), dim3(threads), 0, stream,
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, d_curr_in_map, d_curr_out_map);
break;
case 16:
hipLaunchKernelGGL(( matmul<Dtype, Itype, 16>), dim3(grid), dim3(threads), 0, stream,
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, d_curr_in_map, d_curr_out_map);
break;
case 8:
hipLaunchKernelGGL(( matmul<Dtype, Itype, 8>), dim3(grid), dim3(threads), 0, stream,
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, d_curr_in_map, d_curr_out_map);
break;
}
d_curr_in_map += curr_num_active;
d_curr_out_map += curr_num_active;
}
CUDA_CHECK(hipGetLastError());
}
}
template void ConvolutionForwardKernelGPU<float, int32_t>(
const float *d_in_feat, int in_nchannel, float *d_out_feat,
int out_nchannel, const float *d_kernel,
const std::vector<std::vector<int32_t>> &in_map,
const std::vector<std::vector<int32_t>> &out_map, int out_nrows,
int32_t *d_scr, hipblasHandle_t cuhandle, hipStream_t stream);
template void ConvolutionForwardKernelGPU<double, int32_t>(
const double *d_in_feat, int in_nchannel, double *d_out_feat,
int out_nchannel, const double *d_kernel,
const std::vector<std::vector<int32_t>> &in_map,
const std::vector<std::vector<int32_t>> &out_map, int out_nrows,
int32_t *d_scr, hipblasHandle_t cuhandle, hipStream_t stream);
template <typename Dtype, typename Itype>
void ConvolutionBackwardKernelGPU(
const Dtype *d_in_feat, Dtype *d_grad_in_feat, int in_nchannel,
const Dtype *d_grad_out_feat, int out_nchannel, const Dtype *d_kernel,
Dtype *d_grad_kernel, const std::vector<std::vector<Itype>> &in_maps,
const std::vector<std::vector<Itype>> &out_maps, int out_nrows,
Itype *d_scr, hipblasHandle_t cuhandle, hipStream_t stream) {
CUDA_CHECK_ARGS(hipDeviceSynchronize(), ". Error triggered from a previous kernel call.");
int kernel_volume, n_active_in_volume, shared_mem_size = -1;
Itype *d_in_map, *d_out_map;
kernel_volume = in_maps.size();
// Find the max_n_active fot memory allocation
int max_n_active = -1;
for (int k = 0; k < kernel_volume; k++)
if (max_n_active < (int)(in_maps[k].size()))
max_n_active = (int)(in_maps[k].size());
d_in_map = d_scr;
d_out_map = d_in_map + max_n_active;
// Define the shared memory size
if ((in_nchannel > 16 && out_nchannel > 16 &&
in_nchannel * out_nchannel >= 512) ||
(in_nchannel % 32 == 0 && out_nchannel % 32 == 0))
shared_mem_size = 32;
else if (in_nchannel % 24 == 0 && out_nchannel % 24 == 0)
shared_mem_size = 24;
else if ((in_nchannel > 8 && out_nchannel > 8) ||
(in_nchannel % 16 == 0 && out_nchannel % 16 == 0))
shared_mem_size = 16;
else
shared_mem_size = 8;
dim3 threads(shared_mem_size, shared_mem_size);
for (int k = 0; k < kernel_volume; k++) {
n_active_in_volume = in_maps[k].size();
if (n_active_in_volume == 0)
continue;
// Copy (*p_in_maps)[k] to GPU
CUDA_CHECK(hipMemcpy(d_in_map, in_maps[k].data(),
sizeof(Itype) * n_active_in_volume,
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(d_out_map, out_maps[k].data(),
sizeof(Itype) * n_active_in_volume,
hipMemcpyHostToDevice));
int num_grid = (n_active_in_volume + shared_mem_size - 1) / shared_mem_size;
int num_div = (num_grid + MAX_GRID - 1) / MAX_GRID;
int step = (n_active_in_volume + num_div - 1) / num_div;
Itype *d_curr_in_map = d_in_map, *d_curr_out_map = d_out_map;
for (int s = 0; s < num_div; s++) {
int remainder = n_active_in_volume - step * s;
int curr_num_active = remainder < step ? remainder : step;
dim3 grid((in_nchannel + threads.x - 1) / threads.x,
(curr_num_active + threads.y - 1) / threads.y);
switch (shared_mem_size) {
case 32:
hipLaunchKernelGGL(( matmul2<Dtype, Itype, 32>), dim3(grid), dim3(threads), 0, stream,
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
d_curr_in_map, d_curr_out_map);
break;
case 24:
hipLaunchKernelGGL(( matmul2<Dtype, Itype, 24>), dim3(grid), dim3(threads), 0, stream,
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
d_curr_in_map, d_curr_out_map);
break;
case 16:
hipLaunchKernelGGL(( matmul2<Dtype, Itype, 16>), dim3(grid), dim3(threads), 0, stream,
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
d_curr_in_map, d_curr_out_map);
break;
case 8:
hipLaunchKernelGGL(( matmul2<Dtype, Itype, 8>), dim3(grid), dim3(threads), 0, stream,
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
d_curr_in_map, d_curr_out_map);
break;
}
d_curr_in_map += curr_num_active;
d_curr_out_map += curr_num_active;
}
CUDA_CHECK(hipGetLastError());
}
}
template void ConvolutionBackwardKernelGPU<float, int32_t>(
const float *d_in_feat, float *d_grad_in_feat, int in_nchannel,
const float *d_grad_out_feat, int out_nchannel, const float *d_kernel,
float *p_grad_kernel, const std::vector<std::vector<int32_t>> &in_map,
const std::vector<std::vector<int32_t>> &out_map, int out_nrows,
int32_t *d_scr, hipblasHandle_t cuhandle, hipStream_t stream);
template void ConvolutionBackwardKernelGPU<double, int32_t>(
const double *d_in_feat, double *d_grad_in_feat, int in_nchannel,
const double *d_grad_out_feat, int out_nchannel, const double *d_kernel,
double *p_grad_kernel, const std::vector<std::vector<int32_t>> &in_map,
const std::vector<std::vector<int32_t>> &out_map, int out_nrows,
int32_t *d_scr, hipblasHandle_t cuhandle, hipStream_t stream);
#endif
| 8a98ac36bff2a4ee9f5d2ed3a8b28f1d60d0802f.cu | /* Copyright (c) Chris Choy (chrischoy@ai.stanford.edu).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#ifndef GPU_CONVOLUTION
#define GPU_CONVOLUTION
#include <iostream>
// Use the torch for GPU memory management. Thrust resize gives segfulat during
// debugging -g #include <torch/extension.h>
#include "convolution.cuh"
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <typename Dtype, typename Itype, int BLOCK_SIZE>
__global__ void matmul(const Dtype *A, const int wA, const int hA,
const Dtype *B, const int wB, const int hB, Dtype *C,
const Itype *in_map, const Itype *out_map) {
// Use in_feat as A and kernel as B
// Block index
const int bx = blockIdx.x;
const int by = blockIdx.y;
// Thread index
const int tx = threadIdx.x;
const int ty = threadIdx.y;
// Coordinate. x is for rows, y is for columns.
const int x = BLOCK_SIZE * bx + tx;
const int y = BLOCK_SIZE * by + ty;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
Dtype Csub = 0;
const Itype in_row = y < hA ? in_map[y] : 0;
const Itype out_row = y < hA ? out_map[y] : 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int s = 0; s < wA; s += BLOCK_SIZE) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ Dtype As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ Dtype Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = ((s + tx) < wA && y < hA) ? A[wA * in_row + s + tx] : 0;
Bs[ty][tx] = ((s + ty) < hB && x < wB) ? B[wB * (s + ty) + x] : 0;
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
if (y < hA && x < wB)
atomicAdd(&C[wB * out_row + x], Csub);
// C[wB * out_row + x] += Csub;
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B^T, E = D^T * A
* wA is A's width and wB is B's width
*
* +---+
* |B^T|
* +-------+
* | | |
* | A | C |
* | | |
* | | |
* +------------------+
* | D^T | E |
* +----------+---+
*
*/
template <typename Dtype, typename Itype, int BLOCK_SIZE>
__global__ void matmul2(const Dtype *A, const int wA, const int hA,
const Dtype *B, const int wB, const int hB,
const Dtype *D, const int wD, const int hD, Dtype *C,
Dtype *E, const Itype *in_map, const Itype *out_map) {
// Use grad_out_feat as A, transposed kernel weight as B, and in_feat as D
// Block index
const int bx = blockIdx.x;
const int by = blockIdx.y;
// Thread index
const int tx = threadIdx.x;
const int ty = threadIdx.y;
// Coordinate. x is for rows, y is for columns.
const int x = BLOCK_SIZE * bx + tx;
const int y = BLOCK_SIZE * by + ty;
const Itype in_row = y < hA ? in_map[y] : 0;
const Itype out_row = y < hA ? out_map[y] : 0;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
Dtype Csub = 0;
Dtype Esub = 0;
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ Dtype As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ Dtype BTs[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Ds used to
// store the sub-matrix of D
__shared__ Dtype DTs[BLOCK_SIZE][BLOCK_SIZE];
// For Ds = D^T[...:..., ...:...], use the transposed grid dimension for A
DTs[ty][tx] = (x < wD && y < hD) ? D[wD * in_row + x] : 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int s = 0; s < wA; s += BLOCK_SIZE) {
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = ((s + tx) < wA && y < hA) ? A[wA * out_row + s + tx] : 0;
// Transposed kernel
BTs[ty][tx] = ((s + ty) < wB && x < hB) ? B[wB * x + s + ty] : 0;
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * BTs[k][tx];
}
// For Esub, reset to 0
Esub = 0;
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Esub += DTs[k][ty] * As[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
// For the E matrix which requires accmulation of multiple blocks, use
// atomic addition. This can be replaced with a more sophisticaed reduction
// algorithm.
if ((bx * BLOCK_SIZE + ty) < wD && (s + tx) < wA)
atomicAdd(&E[wA * (bx * BLOCK_SIZE + ty) + (s + tx)], Esub);
}
// Write the block sub-matrix to device memory;
// each thread writes one element
if (y < hA && x < hB)
atomicAdd(&C[hB * in_row + x], Csub);
}
template <typename Dtype, typename Itype>
void ConvolutionForwardKernelGPU(
const Dtype *d_in_feat, int in_nchannel, Dtype *d_out_feat,
int out_nchannel, const Dtype *d_kernel,
const std::vector<std::vector<Itype>> &in_maps,
const std::vector<std::vector<Itype>> &out_maps, int out_nrows,
Itype *d_scr, cublasHandle_t cuhandle, cudaStream_t stream) {
CUDA_CHECK_ARGS(cudaDeviceSynchronize(), ". Error triggered from a previous kernel call.");
// For the in out buffer, use the pre allocated GPU memory space as thrust
// resize gives segfault. Also initializing it with torch allows us to
// allocate memory faster and efficiently.
int kernel_volume, n_active_in_volume, shared_mem_size = -1;
Itype *d_in_map, *d_out_map;
// Copy the in_map, out_map to GPU
kernel_volume = in_maps.size();
// Find the max_n_active for memory allocation
int max_n_active = -1;
for (int k = 0; k < kernel_volume; k++)
if (max_n_active < (int)(in_maps[k].size()))
max_n_active = (int)(in_maps[k].size());
d_in_map = d_scr;
d_out_map = d_in_map + max_n_active;
// Define the shared memory size
if ((in_nchannel > 16 && out_nchannel > 16 &&
in_nchannel * out_nchannel >= 512) ||
(in_nchannel > 24 && out_nchannel > 24))
shared_mem_size = 32;
else if (in_nchannel % 24 == 0 && out_nchannel % 24 == 0)
shared_mem_size = 24;
else if ((in_nchannel > 8 && out_nchannel > 8) ||
(in_nchannel % 16 == 0 && out_nchannel % 16 == 0))
shared_mem_size = 16;
else
shared_mem_size = 8;
dim3 threads(shared_mem_size, shared_mem_size);
// Iterate through each spatial kernel and get indices for in_map and out_map
for (int k = 0; k < kernel_volume; k++) {
n_active_in_volume = in_maps[k].size();
if (n_active_in_volume == 0)
continue;
// Copy (*p_in_maps)[k] to GPU
CUDA_CHECK(cudaMemcpy(d_in_map, in_maps[k].data(),
sizeof(Itype) * n_active_in_volume,
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_out_map, out_maps[k].data(),
sizeof(Itype) * n_active_in_volume,
cudaMemcpyHostToDevice));
int num_grid = (n_active_in_volume + shared_mem_size - 1) / shared_mem_size;
int num_div = (num_grid + MAX_GRID - 1) / MAX_GRID;
int step = (n_active_in_volume + num_div - 1) / num_div;
Itype *d_curr_in_map = d_in_map, *d_curr_out_map = d_out_map;
for (int s = 0; s < num_div; s++) {
int remainder = n_active_in_volume - step * s;
int curr_num_active = remainder < step ? remainder : step;
dim3 grid((out_nchannel + threads.x - 1) / threads.x,
(curr_num_active + threads.y - 1) / threads.y);
switch (shared_mem_size) {
case 32:
matmul<Dtype, Itype, 32><<<grid, threads, 0, stream>>>(
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, d_curr_in_map, d_curr_out_map);
break;
case 24:
matmul<Dtype, Itype, 24><<<grid, threads, 0, stream>>>(
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, d_curr_in_map, d_curr_out_map);
break;
case 16:
matmul<Dtype, Itype, 16><<<grid, threads, 0, stream>>>(
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, d_curr_in_map, d_curr_out_map);
break;
case 8:
matmul<Dtype, Itype, 8><<<grid, threads, 0, stream>>>(
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, d_curr_in_map, d_curr_out_map);
break;
}
d_curr_in_map += curr_num_active;
d_curr_out_map += curr_num_active;
}
CUDA_CHECK(cudaGetLastError());
}
}
template void ConvolutionForwardKernelGPU<float, int32_t>(
const float *d_in_feat, int in_nchannel, float *d_out_feat,
int out_nchannel, const float *d_kernel,
const std::vector<std::vector<int32_t>> &in_map,
const std::vector<std::vector<int32_t>> &out_map, int out_nrows,
int32_t *d_scr, cublasHandle_t cuhandle, cudaStream_t stream);
template void ConvolutionForwardKernelGPU<double, int32_t>(
const double *d_in_feat, int in_nchannel, double *d_out_feat,
int out_nchannel, const double *d_kernel,
const std::vector<std::vector<int32_t>> &in_map,
const std::vector<std::vector<int32_t>> &out_map, int out_nrows,
int32_t *d_scr, cublasHandle_t cuhandle, cudaStream_t stream);
template <typename Dtype, typename Itype>
void ConvolutionBackwardKernelGPU(
const Dtype *d_in_feat, Dtype *d_grad_in_feat, int in_nchannel,
const Dtype *d_grad_out_feat, int out_nchannel, const Dtype *d_kernel,
Dtype *d_grad_kernel, const std::vector<std::vector<Itype>> &in_maps,
const std::vector<std::vector<Itype>> &out_maps, int out_nrows,
Itype *d_scr, cublasHandle_t cuhandle, cudaStream_t stream) {
CUDA_CHECK_ARGS(cudaDeviceSynchronize(), ". Error triggered from a previous kernel call.");
int kernel_volume, n_active_in_volume, shared_mem_size = -1;
Itype *d_in_map, *d_out_map;
kernel_volume = in_maps.size();
// Find the max_n_active fot memory allocation
int max_n_active = -1;
for (int k = 0; k < kernel_volume; k++)
if (max_n_active < (int)(in_maps[k].size()))
max_n_active = (int)(in_maps[k].size());
d_in_map = d_scr;
d_out_map = d_in_map + max_n_active;
// Define the shared memory size
if ((in_nchannel > 16 && out_nchannel > 16 &&
in_nchannel * out_nchannel >= 512) ||
(in_nchannel % 32 == 0 && out_nchannel % 32 == 0))
shared_mem_size = 32;
else if (in_nchannel % 24 == 0 && out_nchannel % 24 == 0)
shared_mem_size = 24;
else if ((in_nchannel > 8 && out_nchannel > 8) ||
(in_nchannel % 16 == 0 && out_nchannel % 16 == 0))
shared_mem_size = 16;
else
shared_mem_size = 8;
dim3 threads(shared_mem_size, shared_mem_size);
for (int k = 0; k < kernel_volume; k++) {
n_active_in_volume = in_maps[k].size();
if (n_active_in_volume == 0)
continue;
// Copy (*p_in_maps)[k] to GPU
CUDA_CHECK(cudaMemcpy(d_in_map, in_maps[k].data(),
sizeof(Itype) * n_active_in_volume,
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_out_map, out_maps[k].data(),
sizeof(Itype) * n_active_in_volume,
cudaMemcpyHostToDevice));
int num_grid = (n_active_in_volume + shared_mem_size - 1) / shared_mem_size;
int num_div = (num_grid + MAX_GRID - 1) / MAX_GRID;
int step = (n_active_in_volume + num_div - 1) / num_div;
Itype *d_curr_in_map = d_in_map, *d_curr_out_map = d_out_map;
for (int s = 0; s < num_div; s++) {
int remainder = n_active_in_volume - step * s;
int curr_num_active = remainder < step ? remainder : step;
dim3 grid((in_nchannel + threads.x - 1) / threads.x,
(curr_num_active + threads.y - 1) / threads.y);
switch (shared_mem_size) {
case 32:
matmul2<Dtype, Itype, 32><<<grid, threads, 0, stream>>>(
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
d_curr_in_map, d_curr_out_map);
break;
case 24:
matmul2<Dtype, Itype, 24><<<grid, threads, 0, stream>>>(
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
d_curr_in_map, d_curr_out_map);
break;
case 16:
matmul2<Dtype, Itype, 16><<<grid, threads, 0, stream>>>(
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
d_curr_in_map, d_curr_out_map);
break;
case 8:
matmul2<Dtype, Itype, 8><<<grid, threads, 0, stream>>>(
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
d_curr_in_map, d_curr_out_map);
break;
}
d_curr_in_map += curr_num_active;
d_curr_out_map += curr_num_active;
}
CUDA_CHECK(cudaGetLastError());
}
}
template void ConvolutionBackwardKernelGPU<float, int32_t>(
const float *d_in_feat, float *d_grad_in_feat, int in_nchannel,
const float *d_grad_out_feat, int out_nchannel, const float *d_kernel,
float *p_grad_kernel, const std::vector<std::vector<int32_t>> &in_map,
const std::vector<std::vector<int32_t>> &out_map, int out_nrows,
int32_t *d_scr, cublasHandle_t cuhandle, cudaStream_t stream);
template void ConvolutionBackwardKernelGPU<double, int32_t>(
const double *d_in_feat, double *d_grad_in_feat, int in_nchannel,
const double *d_grad_out_feat, int out_nchannel, const double *d_kernel,
double *p_grad_kernel, const std::vector<std::vector<int32_t>> &in_map,
const std::vector<std::vector<int32_t>> &out_map, int out_nrows,
int32_t *d_scr, cublasHandle_t cuhandle, cudaStream_t stream);
#endif
|
2aabfcdc5277f911b6cfcc5d8c43854957b827a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2011, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "gpujpeg_huffman_gpu_encoder.h"
#include <libgpujpeg/gpujpeg_util.h>
#define WARPS_NUM 8
/** Natural order in constant memory */
__constant__ int gpujpeg_huffman_gpu_encoder_order_natural[GPUJPEG_ORDER_NATURAL_SIZE];
/** Size of occupied part of output buffer */
__device__ unsigned int gpujpeg_huffman_output_byte_count;
/**
* Huffman coding tables in constant memory - each has 257 items (256 + 1 extra)
* There are are 4 of them - one after another, in following order:
* - luminance (Y) AC
* - luminance (Y) DC
* - chroma (cb/cr) AC
* - chroma (cb/cr) DC
*/
__device__ uint32_t gpujpeg_huffman_gpu_lut[(256 + 1) * 4];
/**
* Value decomposition in constant memory (input range from -4096 to 4095 ... both inclusive)
* Mapping from coefficient value into the code for the value ind its bit size.
*/
__device__ unsigned int gpujpeg_huffman_value_decomposition[8 * 1024];
/**
* Initializes coefficient decomposition table in global memory. (CC >= 2.0)
* Output table is a mapping from some value into its code and bit size.
*/
__global__ static void
gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel() {
// fetch some value
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int value = tid - 4096;
// decompose it
unsigned int value_code = value;
int absolute = value;
if ( value < 0 ) {
// valu eis now absolute value of input
absolute = -absolute;
// For a negative input, want temp2 = bitwise complement of abs(input)
// This code assumes we are on a two's complement machine
value_code--;
}
// Find the number of bits needed for the magnitude of the coefficient
unsigned int value_nbits = 0;
while ( absolute ) {
value_nbits++;
absolute >>= 1;
}
// save result packed into unsigned int (value bits are left aligned in MSBs and size is right aligned in LSBs)
gpujpeg_huffman_value_decomposition[tid] = value_nbits | (value_code << (32 - value_nbits));
}
#if __CUDA_ARCH__ >= 200
/**
* Adds up to 32 bits at once into ouptut buffer, applying byte stuffing.
* Codeword value must be aligned to left (most significant bits). (CC >= 2.0)
*/
__device__ static void
gpujpeg_huffman_gpu_encoder_emit_bits(unsigned int & remaining_bits, int & byte_count, int & bit_count, uint8_t * const out_ptr, const unsigned int packed_code_word)
{
// decompose packed codeword into the msb-aligned value and bit-length of the value
const unsigned int code_word = packed_code_word & ~31;
const unsigned int code_bit_size = packed_code_word & 31;
// concatenate with remaining bits
remaining_bits |= code_word >> bit_count;
bit_count += code_bit_size;
// flush some bytes if have more than 8 bits
if (bit_count >= 8) {
do {
const unsigned int out_byte = remaining_bits >> 24;
out_ptr[byte_count++] = out_byte;
if(0xff == out_byte) {
// keep zero byte after each 0xFF (buffer is expected to be zeroed)
out_ptr[byte_count++] = 0;
}
remaining_bits <<= 8;
bit_count -= 8;
} while (bit_count >= 8);
// keep only remaining bits in the buffer
remaining_bits = code_word << (code_bit_size - bit_count);
remaining_bits &= 0xfffffffe << (31 - bit_count);
}
}
/**
* Given some huffman table offset, RLE zero count and coefficient value,
* this returns huffman codeword for the value (packed in 27 MSBs)
* together with its bit size (in 5 LSBs). (CC >= 2.0)
*/
__device__ static unsigned int
gpujpeg_huffman_gpu_encode_value(const int preceding_zero_count, const int coefficient,
const int huffman_lut_offset)
{
// value bits are in MSBs (left aligned) and bit size of the value is in LSBs (right aligned)
const unsigned int packed_value = gpujpeg_huffman_value_decomposition[4096 + coefficient];
// decompose value info into upshifted value and value's bit size
const int value_nbits = packed_value & 0xf;
const unsigned int value_code = packed_value & ~0xf;
// find prefix of the codeword and size of the prefix
const int huffman_lut_idx = huffman_lut_offset + preceding_zero_count * 16 + value_nbits;
const unsigned int packed_prefix = gpujpeg_huffman_gpu_lut[huffman_lut_idx];
const unsigned int prefix_nbits = packed_prefix & 31;
// compose packed codeword with its size
return (packed_prefix + value_nbits) | (value_code >> prefix_nbits);
}
/**
* Flush remaining codewords from buffer in shared memory to global memory output buffer. (CC >= 2.0)
*/
__device__ static void
gpujpeg_huffman_gpu_encoder_flush_codewords(unsigned int * const s_out, unsigned int * &data_compressed, int & remaining_codewords, const int tid) {
// this works for up to 4 * 32 remaining codewords
if(remaining_codewords) {
// pad remaining codewords with extra zero-sized codewords, not to have to use special case in serialization kernel, which saves 4 codewords at once
s_out[remaining_codewords + tid] = 0;
// save all remaining codewords at once (together with some zero sized padding codewords)
*((uint4*)data_compressed) = ((uint4*)s_out)[tid];
// update codeword counter
data_compressed += remaining_codewords;
remaining_codewords = 0;
}
}
/**
* Encode one 8x8 block (CC >= 2.0)
*
* @return 0 if succeeds, otherwise nonzero
*/
__device__ static int
gpujpeg_huffman_gpu_encoder_encode_block(const int16_t * block, unsigned int * &data_compressed, unsigned int * const s_out,
int & remaining_codewords, const int last_dc_idx, int tid, const int huffman_lut_offset)
{
// each thread loads a pair of values (pair after zigzag reordering)
const int load_idx = tid * 2;
int in_even = block[gpujpeg_huffman_gpu_encoder_order_natural[load_idx]];
const int in_odd = block[gpujpeg_huffman_gpu_encoder_order_natural[load_idx + 1]];
// compute preceding zero count for even coefficient (actually compute the count multiplied by 16)
const unsigned int nonzero_mask = (1 << tid) - 1;
const unsigned int nonzero_bitmap_0 = 1 | __ballot(in_even); // DC is always treated as nonzero
const unsigned int nonzero_bitmap_1 = __ballot(in_odd);
const unsigned int nonzero_bitmap_pairs = nonzero_bitmap_0 | nonzero_bitmap_1;
const int zero_pair_count = __clz(nonzero_bitmap_pairs & nonzero_mask);
int zeros_before_even = 2 * (zero_pair_count + tid - 32);
if((0x80000000 >> zero_pair_count) > (nonzero_bitmap_1 & nonzero_mask)) {
zeros_before_even += 1;
}
// true if any nonzero pixel follows thread's odd pixel
const bool nonzero_follows = nonzero_bitmap_pairs & ~nonzero_mask;
// count of consecutive zeros before odd value (either one more than
// even if even is zero or none if even value itself is nonzero)
// (the count is actually multiplied by 16)
int zeros_before_odd = in_even || !tid ? 0 : zeros_before_even + 1;
// clear zero counts if no nonzero pixel follows (so that no 16-zero symbols will be emited)
// otherwise only trim extra bits from the counts of following zeros
const int zero_count_mask = nonzero_follows ? 0xF : 0;
zeros_before_even &= zero_count_mask;
zeros_before_odd &= zero_count_mask;
// pointer to LUT for encoding thread's even value
// (only thread #0 uses DC table, others use AC table)
int even_lut_offset = huffman_lut_offset;
// first thread handles special DC coefficient
if(0 == tid) {
// first thread uses DC part of the table for its even value
even_lut_offset += 256 + 1;
// update last DC coefficient (saved at the special place at the end of the shared bufer)
const int original_in_even = in_even;
in_even -= ((int*)s_out)[last_dc_idx];
((int*)s_out)[last_dc_idx] = original_in_even;
}
// last thread handles special block-termination symbol
if(0 == ((tid ^ 31) | in_odd)) {
// this causes selection of huffman symbol at index 256 (which contains the termination symbol)
zeros_before_odd = 16;
}
// each thread gets codeword for its two pixels
unsigned int even_code = gpujpeg_huffman_gpu_encode_value(zeros_before_even, in_even, even_lut_offset);
unsigned int odd_code = gpujpeg_huffman_gpu_encode_value(zeros_before_odd, in_odd, huffman_lut_offset);
// concatenate both codewords into one if they are short enough
const unsigned int even_code_size = even_code & 31;
const unsigned int odd_code_size = odd_code & 31;
const unsigned int total_size = even_code_size + odd_code_size;
if(total_size <= 27) {
even_code = total_size | ((odd_code & ~31) >> even_code_size) | (even_code & ~31);
odd_code = 0;
}
// each thread get number of preceding nonzero codewords and total number of nonzero codewords in this block
const unsigned int even_codeword_presence = __ballot(even_code);
const unsigned int odd_codeword_presence = __ballot(odd_code);
const int codeword_offset = __popc(nonzero_mask & even_codeword_presence)
+ __popc(nonzero_mask & odd_codeword_presence);
// each thread saves its values into temporary shared buffer
if(even_code) {
s_out[remaining_codewords + codeword_offset] = even_code;
if(odd_code) {
s_out[remaining_codewords + codeword_offset + 1] = odd_code;
}
}
// advance count of codewords in shared memory buffer
remaining_codewords += __popc(odd_codeword_presence) + __popc(even_codeword_presence);
// flush some codewords to global memory if there are too many of them in shared buffer
const int flush_count = 32 * 4; // = half of the buffer
if(remaining_codewords > flush_count) {
// move first half of the buffer into output buffer in global memory and update output pointer
*((uint4*)data_compressed) = ((uint4*)s_out)[tid];
data_compressed += flush_count;
// shift remaining codewords to begin of the buffer and update their count
((uint4*)s_out)[tid] = ((uint4*)s_out)[flush_count / 4 + tid]; // 4 for 4 uints in uint4
remaining_codewords -= flush_count;
}
// nothing to fail here
return 0;
}
#endif // #if __CUDA_ARCH__ >= 200
/**
* Huffman encoder kernel (For compute capability >= 2.0)
*
* @return void
*/
template <bool CONTINUOUS_BLOCK_LIST>
#if __CUDA_ARCH__ >= 200
__launch_bounds__(WARPS_NUM * 32, 1024 / (WARPS_NUM * 32))
#endif
__global__ static void
gpujpeg_huffman_encoder_encode_kernel_warp(
struct gpujpeg_segment* d_segment,
int segment_count,
uint8_t* d_data_compressed,
const uint64_t* const d_block_list,
int16_t* const d_data_quantized,
struct gpujpeg_component* const d_component,
const int comp_count
) {
#if __CUDA_ARCH__ >= 200
int warpidx = threadIdx.x >> 5;
int tid = threadIdx.x & 31;
__shared__ uint4 s_out_all[(64 + 1) * WARPS_NUM];
unsigned int * s_out = (unsigned int*)(s_out_all + warpidx * (64 + 1));
// Number of remaining codewords in shared buffer
int remaining_codewords = 0;
// Select Segment
const int block_idx = blockIdx.x + blockIdx.y * gridDim.x;
const int segment_index = block_idx * WARPS_NUM + warpidx;
// first thread initializes compact output size for next kernel
if(0 == tid && 0 == warpidx && 0 == block_idx) {
gpujpeg_huffman_output_byte_count = 0;
}
// stop if out of segment bounds
if ( segment_index >= segment_count )
return;
struct gpujpeg_segment* segment = &d_segment[segment_index];
// Initialize last DC coefficients
if(tid < 3) {
s_out[256 + tid] = 0;
}
// Prepare data pointers
unsigned int * data_compressed = (unsigned int*)(d_data_compressed + segment->data_temp_index);
unsigned int * data_compressed_start = data_compressed;
// Pre-add thread ID to output pointer (it's allways used only with it)
data_compressed += (tid * 4);
// Encode all block in segment
if(CONTINUOUS_BLOCK_LIST) {
// Get component for current scan
const struct gpujpeg_component* component = &d_component[segment->scan_index];
// mcu size of the component
const int comp_mcu_size = component->mcu_size;
// Get component data for MCU (first block)
const int16_t* block = component->d_data_quantized + (segment->scan_segment_index * component->segment_mcu_count) * comp_mcu_size;
// Get huffman table offset
const int huffman_table_offset = component->type == GPUJPEG_COMPONENT_LUMINANCE ? 0 : (256 + 1) * 2; // possibly skips luminance tables
// Encode MCUs in segment
for (int block_count = segment->mcu_count; block_count--;) {
// Encode 8x8 block
gpujpeg_huffman_gpu_encoder_encode_block(block, data_compressed, s_out, remaining_codewords, 256, tid, huffman_table_offset);
// Advance to next block
block += comp_mcu_size;
}
} else {
// Pointer to segment's list of 8x8 blocks and their count
const uint64_t* packed_block_info_ptr = d_block_list + segment->block_index_list_begin;
// Encode all blocks
for(int block_count = segment->block_count; block_count--;) {
// Get pointer to next block input data and info about its color type
const uint64_t packed_block_info = *(packed_block_info_ptr++);
// Get coder parameters
const int last_dc_idx = 256 + (packed_block_info & 0x7f);
// Get offset to right part of huffman table
const int huffman_table_offset = packed_block_info & 0x80 ? (256 + 1) * 2 : 0; // possibly skips luminance tables
// Source data pointer
int16_t* block = &d_data_quantized[packed_block_info >> 8];
// Encode 8x8 block
gpujpeg_huffman_gpu_encoder_encode_block(block, data_compressed, s_out, remaining_codewords, last_dc_idx, tid, huffman_table_offset);
}
}
// flush remaining codewords
gpujpeg_huffman_gpu_encoder_flush_codewords(s_out, data_compressed, remaining_codewords, tid);
// Set number of codewords.
if (tid == 0 ) {
segment->data_compressed_size = data_compressed - data_compressed_start;
}
#endif // #if __CUDA_ARCH__ >= 200
}
#define SERIALIZATION_THREADS_PER_TBLOCK 192
/**
* Codeword serialization kernel (CC >= 2.0).
*
* @return void
*/
#if __CUDA_ARCH__ >= 200
__launch_bounds__(SERIALIZATION_THREADS_PER_TBLOCK, 1536 / SERIALIZATION_THREADS_PER_TBLOCK)
#endif
__global__ static void
gpujpeg_huffman_encoder_serialization_kernel(
struct gpujpeg_segment* d_segment,
int segment_count,
const uint8_t* const d_src,
uint8_t* const d_dest
) {
#if __CUDA_ARCH__ >= 200
// Temp buffer for all threads of the threadblock
__shared__ uint4 s_temp_all[2 * SERIALIZATION_THREADS_PER_TBLOCK];
// Thread's 32 bytes in shared memory for output composition
uint4 * const s_temp = s_temp_all + threadIdx.x * 2;
// Select Segment
const int block_idx = blockIdx.x + blockIdx.y * gridDim.x;
int segment_index = block_idx * SERIALIZATION_THREADS_PER_TBLOCK + threadIdx.x;
if ( segment_index >= segment_count )
return;
// Thread's segment
struct gpujpeg_segment* const segment = &d_segment[segment_index];
// Input and output pointers
const int data_offset = segment->data_temp_index;
uint4 * const d_dest_stream_start = (uint4*)(d_dest + data_offset);
uint4 * d_dest_stream = d_dest_stream_start;
const uint4 * d_src_codewords = (uint4*)(d_src + data_offset);
// number of bytes in the temp buffer, remaining bits and their count
int byte_count = 0, bit_count = 0;
unsigned int remaining_bits = 0;
// "data_compressed_size" is now initialized to number of codewords to be serialized
for(int cword_tuple_count = (segment->data_compressed_size + 3) >> 2; cword_tuple_count--; ) // reading 4 codewords at once
{
// read 4 codewords and advance input pointer to next ones
const uint4 cwords = *(d_src_codewords++);
// encode first pair of codewords
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.x);
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.y);
// possibly flush output if have at least 16 bytes
if(byte_count >= 16) {
// write 16 bytes into destination buffer
*(d_dest_stream++) = s_temp[0];
// move remaining bytes to first half of the buffer
s_temp[0] = s_temp[1];
// update number of remaining bits
byte_count -= 16;
}
// encode other two codewords
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.z);
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.w);
// possibly flush output if have at least 16 bytes
if(byte_count >= 16) {
// write 16 bytes into destination buffer
*(d_dest_stream++) = s_temp[0];
// move remaining bytes to first half of the buffer
s_temp[0] = s_temp[1];
// update number of remaining bits
byte_count -= 16;
}
}
// Emit left bits
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, 0xfe000007);
// Terminate codestream with restart marker
((uint8_t*)s_temp)[byte_count + 0] = 0xFF;
((uint8_t*)s_temp)[byte_count + 1] = GPUJPEG_MARKER_RST0 + (segment->scan_segment_index % 8);
// flush remaining bytes
d_dest_stream[0] = s_temp[0];
d_dest_stream[1] = s_temp[1];
// Set compressed size
segment->data_compressed_size = (d_dest_stream - d_dest_stream_start) * 16 + byte_count + 2;
#endif // #if __CUDA_ARCH__ >= 200
}
/**
* Huffman coder compact output allocation kernel - serially reserves
* some space for compressed output of segments in output buffer.
* (For CC 1.0 - a workaround for missing atomic operations.)
*
* Only single threadblock with 512 threads is launched.
*/
__global__ static void
gpujpeg_huffman_encoder_allocation_kernel (
struct gpujpeg_segment* const d_segment,
const int segment_count
) {
// offsets of segments
__shared__ unsigned int s_segment_offsets[512];
// cumulative sum of bytes of all segments
unsigned int total_byte_count = 0;
// iterate over all segments
const unsigned int segment_idx_end = (segment_count + 511) & ~511;
for(unsigned int segment_idx = threadIdx.x; segment_idx < segment_idx_end; segment_idx += 512) {
// all threads load byte sizes of their segments (rounded up to next multiple of 16 B) into the shared array
s_segment_offsets[threadIdx.x] = segment_idx < segment_count
? (d_segment[segment_idx].data_compressed_size + 15) & ~15
: 0;
// first thread runs a sort of serial prefix sum over the segment sizes to get their offsets
__syncthreads();
if(0 == threadIdx.x) {
#pragma unroll 4
for(int i = 0; i < 512; i++) {
const unsigned int segment_size = s_segment_offsets[i];
s_segment_offsets[i] = total_byte_count;
total_byte_count += segment_size;
}
}
__syncthreads();
// all threads write offsets back into corresponding segment structures
if(segment_idx < segment_count) {
d_segment[segment_idx].data_compressed_index = s_segment_offsets[threadIdx.x];
}
}
// first thread finally saves the total sum of bytes needed for compressed data
if(threadIdx.x == 0) {
gpujpeg_huffman_output_byte_count = total_byte_count;
}
}
/**
* Huffman coder output compaction kernel.
*
* @return void
*/
__global__ static void
gpujpeg_huffman_encoder_compaction_kernel (
struct gpujpeg_segment* const d_segment,
const int segment_count,
const uint8_t* const d_src,
uint8_t* const d_dest
) {
// get some segment (size of threadblocks is 32 x N, so threadIdx.y is warp index)
const int block_idx = blockIdx.x + blockIdx.y * gridDim.x;
const int segment_idx = threadIdx.y + block_idx * blockDim.y;
if(segment_idx >= segment_count) {
return;
}
// temp variables for all warps
__shared__ uint4* volatile s_out_ptrs[WARPS_NUM];
// get info about the segment
const unsigned int segment_byte_count = (d_segment[segment_idx].data_compressed_size + 15) & ~15; // number of bytes rounded up to multiple of 16
const unsigned int segment_in_offset = d_segment[segment_idx].data_temp_index; // this should be aligned at least to 16byte boundary
// first thread of each warp reserves space in output buffer
if(0 == threadIdx.x) {
// Either load precomputed output offset (for CC 1.0) or compute it now (for CCs with atomic operations)
#if __CUDA_ARCH__ == 100
const unsigned int segment_out_offset = d_segment[segment_idx].data_compressed_index;
#else
const unsigned int segment_out_offset = atomicAdd(&gpujpeg_huffman_output_byte_count, segment_byte_count);
d_segment[segment_idx].data_compressed_index = segment_out_offset;
#endif
s_out_ptrs[threadIdx.y] = (uint4*)(d_dest + segment_out_offset);
}
// all threads read output buffer offset for their segment and prepare input and output pointers and number of copy iterations
const uint4 * d_in = threadIdx.x + (uint4*)(d_src + segment_in_offset);
uint4 * d_out = threadIdx.x + s_out_ptrs[threadIdx.y];
unsigned int copy_iterations = segment_byte_count / 512; // 512 is number of bytes copied in each iteration (32 threads * 16 bytes per thread)
// copy the data!
while(copy_iterations--) {
*d_out = *d_in;
d_out += 32;
d_in += 32;
}
// copy remaining bytes (less than 512 bytes)
if((threadIdx.x * 16) < (segment_byte_count & 511)) {
*d_out = *d_in;
}
}
// Threadblock size for CC 1.x kernel
#define THREAD_BLOCK_SIZE 48
#ifdef GPUJPEG_HUFFMAN_CODER_TABLES_IN_CONSTANT
__constant__
#endif
/** Allocate huffman tables in constant memory */
__device__ struct gpujpeg_table_huffman_encoder gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_TYPE_COUNT][GPUJPEG_HUFFMAN_TYPE_COUNT];
/**
* Write one byte to compressed data (CC 1.x)
*
* @param data_compressed Data compressed
* @param value Byte value to write
* @return void
*/
#define gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, value) { \
*data_compressed = (uint8_t)(value); \
data_compressed++; }
/**
* Write two bytes to compressed data (CC 1.x)
*
* @param data_compressed Data compressed
* @param value Two-byte value to write
* @return void
*/
#define gpujpeg_huffman_gpu_encoder_emit_2byte(data_compressed, value) { \
*data_compressed = (uint8_t)(((value) >> 8) & 0xFF); \
data_compressed++; \
*data_compressed = (uint8_t)((value) & 0xFF); \
data_compressed++; }
/**
* Write marker to compressed data (CC 1.x)
*
* @param data_compressed Data compressed
* @oaran marker Marker to write (JPEG_MARKER_...)
* @return void
*/
#define gpujpeg_huffman_gpu_encoder_marker(data_compressed, marker) { \
*data_compressed = 0xFF;\
data_compressed++; \
*data_compressed = (uint8_t)(marker); \
data_compressed++; }
/**
* Output bits to the file. Only the right 24 bits of put_buffer are used;
* the valid bits are left-justified in this part. At most 16 bits can be
* passed to EmitBits in one call, and we never retain more than 7 bits
* in put_buffer between calls, so 24 bits are sufficient. Version for CC 1.x
*
* @param coder Huffman coder structure
* @param code Huffman code
* @param size Size in bits of the Huffman code
* @return void
*/
__device__ static int
gpujpeg_huffman_gpu_encoder_emit_bits(unsigned int code, int size, int & put_value, int & put_bits, uint8_t* & data_compressed)
{
// This routine is heavily used, so it's worth coding tightly
int _put_buffer = (int)code;
int _put_bits = put_bits;
// If size is 0, caller used an invalid Huffman table entry
if ( size == 0 )
return -1;
// Mask off any extra bits in code
_put_buffer &= (((int)1) << size) - 1;
// New number of bits in buffer
_put_bits += size;
// Align incoming bits
_put_buffer <<= 24 - _put_bits;
// And merge with old buffer contents
_put_buffer |= put_value;
// If there are more than 8 bits, write it out
unsigned char uc;
while ( _put_bits >= 8 ) {
// Write one byte out
uc = (unsigned char) ((_put_buffer >> 16) & 0xFF);
gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, uc);
// If need to stuff a zero byte
if ( uc == 0xFF ) {
// Write zero byte out
gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, 0);
}
_put_buffer <<= 8;
_put_bits -= 8;
}
// update state variables
put_value = _put_buffer;
put_bits = _put_bits;
return 0;
}
/**
* Emit left bits (CC 1.x)
*
* @param coder Huffman coder structure
* @return void
*/
__device__ static void
gpujpeg_huffman_gpu_encoder_emit_left_bits(int & put_value, int & put_bits, uint8_t* & data_compressed)
{
// Fill 7 bits with ones
if ( gpujpeg_huffman_gpu_encoder_emit_bits(0x7F, 7, put_value, put_bits, data_compressed) != 0 )
return;
//unsigned char uc = (unsigned char) ((put_value >> 16) & 0xFF);
// Write one byte out
//gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, uc);
put_value = 0;
put_bits = 0;
}
/**
* Encode one 8x8 block (for CC 1.x)
*
* @return 0 if succeeds, otherwise nonzero
*/
__device__ static int
gpujpeg_huffman_gpu_encoder_encode_block(int & put_value, int & put_bits, int & dc, int16_t* data, uint8_t* & data_compressed,
struct gpujpeg_table_huffman_encoder* d_table_dc, struct gpujpeg_table_huffman_encoder* d_table_ac)
{
typedef uint64_t loading_t;
const int loading_iteration_count = 64 * 2 / sizeof(loading_t);
// Load block to shared memory
__shared__ int16_t s_data[64 * THREAD_BLOCK_SIZE];
for ( int i = 0; i < loading_iteration_count; i++ ) {
((loading_t*)s_data)[loading_iteration_count * threadIdx.x + i] = ((loading_t*)data)[i];
}
int data_start = 64 * threadIdx.x;
// Encode the DC coefficient difference per section F.1.2.1
int temp = s_data[data_start + 0] - dc;
dc = s_data[data_start + 0];
int temp2 = temp;
if ( temp < 0 ) {
// Temp is abs value of input
temp = -temp;
// For a negative input, want temp2 = bitwise complement of abs(input)
// This code assumes we are on a two's complement machine
temp2--;
}
// Find the number of bits needed for the magnitude of the coefficient
int nbits = 0;
while ( temp ) {
nbits++;
temp >>= 1;
}
// Write category number
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_dc->code[nbits], d_table_dc->size[nbits], put_value, put_bits, data_compressed) != 0 ) {
return -1;
}
// Write category offset (EmitBits rejects calls with size 0)
if ( nbits ) {
if ( gpujpeg_huffman_gpu_encoder_emit_bits((unsigned int) temp2, nbits, put_value, put_bits, data_compressed) != 0 )
return -1;
}
// Encode the AC coefficients per section F.1.2.2 (r = run length of zeros)
int r = 0;
for ( int k = 1; k < 64; k++ )
{
temp = s_data[data_start + gpujpeg_huffman_gpu_encoder_order_natural[k]];
if ( temp == 0 ) {
r++;
}
else {
// If run length > 15, must emit special run-length-16 codes (0xF0)
while ( r > 15 ) {
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[0xF0], d_table_ac->size[0xF0], put_value, put_bits, data_compressed) != 0 )
return -1;
r -= 16;
}
temp2 = temp;
if ( temp < 0 ) {
// temp is abs value of input
temp = -temp;
// This code assumes we are on a two's complement machine
temp2--;
}
// Find the number of bits needed for the magnitude of the coefficient
// there must be at least one 1 bit
nbits = 1;
while ( (temp >>= 1) )
nbits++;
// Emit Huffman symbol for run length / number of bits
int i = (r << 4) + nbits;
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[i], d_table_ac->size[i], put_value, put_bits, data_compressed) != 0 )
return -1;
// Write Category offset
if ( gpujpeg_huffman_gpu_encoder_emit_bits((unsigned int) temp2, nbits, put_value, put_bits, data_compressed) != 0 )
return -1;
r = 0;
}
}
// If all the left coefs were zero, emit an end-of-block code
if ( r > 0 ) {
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[0], d_table_ac->size[0], put_value, put_bits, data_compressed) != 0 )
return -1;
}
return 0;
}
/**
* Huffman encoder kernel (for CC 1.x)
*
* @return void
*/
__global__ static void
gpujpeg_huffman_encoder_encode_kernel(
struct gpujpeg_component* d_component,
struct gpujpeg_segment* d_segment,
int comp_count,
int segment_count,
uint8_t* d_data_compressed
)
{
int segment_index = blockIdx.x * blockDim.x + threadIdx.x;
if ( segment_index >= segment_count )
return;
struct gpujpeg_segment* segment = &d_segment[segment_index];
// first thread initializes compact output size for next kernel
if(0 == segment_index) {
gpujpeg_huffman_output_byte_count = 0;
}
// Initialize huffman coder
int put_value = 0;
int put_bits = 0;
int dc[GPUJPEG_MAX_COMPONENT_COUNT];
for ( int comp = 0; comp < GPUJPEG_MAX_COMPONENT_COUNT; comp++ )
dc[comp] = 0;
// Prepare data pointers
uint8_t* data_compressed = &d_data_compressed[segment->data_temp_index];
uint8_t* data_compressed_start = data_compressed;
// Non-interleaving mode
if ( comp_count == 1 ) {
int segment_index = segment->scan_segment_index;
// Encode MCUs in segment
for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
// Get component for current scan
struct gpujpeg_component* component = &d_component[segment->scan_index];
// Get component data for MCU
int16_t* block = &component->d_data_quantized[(segment_index * component->segment_mcu_count + mcu_index) * component->mcu_size];
// Get coder parameters
int & component_dc = dc[segment->scan_index];
// Get huffman tables
struct gpujpeg_table_huffman_encoder* d_table_dc = NULL;
struct gpujpeg_table_huffman_encoder* d_table_ac = NULL;
if ( component->type == GPUJPEG_COMPONENT_LUMINANCE ) {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC];
} else {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC];
}
// Encode 8x8 block
if ( gpujpeg_huffman_gpu_encoder_encode_block(put_value, put_bits, component_dc, block, data_compressed, d_table_dc, d_table_ac) != 0 )
break;
}
}
// Interleaving mode
else {
int segment_index = segment->scan_segment_index;
// Encode MCUs in segment
for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
//assert(segment->scan_index == 0);
for ( int comp = 0; comp < comp_count; comp++ ) {
struct gpujpeg_component* component = &d_component[comp];
// Prepare mcu indexes
int mcu_index_x = (segment_index * component->segment_mcu_count + mcu_index) % component->mcu_count_x;
int mcu_index_y = (segment_index * component->segment_mcu_count + mcu_index) / component->mcu_count_x;
// Compute base data index
int data_index_base = mcu_index_y * (component->mcu_size * component->mcu_count_x) + mcu_index_x * (component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
// For all vertical 8x8 blocks
for ( int y = 0; y < component->sampling_factor.vertical; y++ ) {
// Compute base row data index
int data_index_row = data_index_base + y * (component->mcu_count_x * component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
// For all horizontal 8x8 blocks
for ( int x = 0; x < component->sampling_factor.horizontal; x++ ) {
// Compute 8x8 block data index
int data_index = data_index_row + x * GPUJPEG_BLOCK_SIZE * GPUJPEG_BLOCK_SIZE;
// Get component data for MCU
int16_t* block = &component->d_data_quantized[data_index];
// Get coder parameters
int & component_dc = dc[comp];
// Get huffman tables
struct gpujpeg_table_huffman_encoder* d_table_dc = NULL;
struct gpujpeg_table_huffman_encoder* d_table_ac = NULL;
if ( component->type == GPUJPEG_COMPONENT_LUMINANCE ) {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC];
} else {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC];
}
// Encode 8x8 block
gpujpeg_huffman_gpu_encoder_encode_block(put_value, put_bits, component_dc, block, data_compressed, d_table_dc, d_table_ac);
}
}
}
}
}
// Emit left bits
if ( put_bits > 0 )
gpujpeg_huffman_gpu_encoder_emit_left_bits(put_value, put_bits, data_compressed);
// Output restart marker
int restart_marker = GPUJPEG_MARKER_RST0 + (segment->scan_segment_index % 8);
gpujpeg_huffman_gpu_encoder_marker(data_compressed, restart_marker);
// Set compressed size
segment->data_compressed_size = data_compressed - data_compressed_start;
}
/** Adds packed coefficients into the GPU version of Huffman lookup table. */
void
gpujpeg_huffman_gpu_add_packed_table(uint32_t * const dest, const struct gpujpeg_table_huffman_encoder * const src, const bool is_ac) {
// make a upshifted copy of the table for GPU encoding
for ( int i = 0; i <= 256; i++ ) {
const int size = src->size[i & 0xFF];
dest[i] = (src->code[i & 0xFF] << (32 - size)) | size;
}
// reserve first index in GPU version of AC table for special purposes
if ( is_ac ) {
dest[0] = 0;
}
}
/** Documented at declaration */
int
gpujpeg_huffman_gpu_encoder_init(const struct gpujpeg_encoder * encoder)
{
// Initialize decomposition lookup table
hipFuncSetCacheConfig(gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel, hipFuncCachePreferShared);
hipLaunchKernelGGL(( gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel), dim3(32), dim3(256), 0, 0, ); // 8192 threads total
hipDeviceSynchronize();
gpujpeg_cuda_check_error("Decomposition LUT initialization failed");
// compose GPU version of the huffman LUT and copy it into GPU memory (for CC >= 2.0)
uint32_t gpujpeg_huffman_cpu_lut[(256 + 1) * 4];
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 0, &encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC], true);
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 1, &encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC], false);
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 2, &encoder->table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC], true);
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 3, &encoder->table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC], false);
hipMemcpyToSymbol(
gpujpeg_huffman_gpu_lut,
gpujpeg_huffman_cpu_lut,
(256 + 1) * 4 * sizeof(*gpujpeg_huffman_gpu_lut),
0,
hipMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman encoder init (Huffman LUT copy)");
// Copy original Huffman coding tables to GPU memory (for CC 1.x)
hipMemcpyToSymbol(
gpujpeg_huffman_gpu_encoder_table_huffman,
&encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC],
sizeof(gpujpeg_huffman_gpu_encoder_table_huffman),
0,
hipMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman encoder init (Huffman coding table)");
// Copy natural order to constant device memory
hipMemcpyToSymbol(
gpujpeg_huffman_gpu_encoder_order_natural,
gpujpeg_order_natural,
GPUJPEG_ORDER_NATURAL_SIZE * sizeof(int),
0,
hipMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman encoder init (natural order copy)");
// Configure more shared memory for all kernels
hipFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel_warp<true>, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel_warp<false>, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_encoder_serialization_kernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_encoder_compaction_kernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_encoder_allocation_kernel, hipFuncCachePreferShared);
return 0;
}
/**
* Get grid size for specified count of threadblocks. (Grid size is limited
* to 65536 in both directions, so if we need more threadblocks, we must use
* both x and y coordinates.)
*/
dim3
gpujpeg_huffman_gpu_encoder_grid_size(int tblock_count)
{
dim3 size(tblock_count);
while(size.x > 0xffff) {
size.x = (size.x + 1) >> 1;
size.y <<= 1;
}
return size;
}
/** Documented at declaration */
int
gpujpeg_huffman_gpu_encoder_encode(struct gpujpeg_encoder* encoder, unsigned int * output_byte_count)
{
// Get coder
struct gpujpeg_coder* coder = &encoder->coder;
assert(coder->param.restart_interval > 0);
// Select encoder kernel which either expects continuos segments of blocks or uses block lists
int comp_count = 1;
if ( coder->param.interleaved == 1 )
comp_count = coder->param_image.comp_count;
assert(comp_count >= 1 && comp_count <= GPUJPEG_MAX_COMPONENT_COUNT);
// Select encoder kernel based on compute capability
if ( encoder->coder.cuda_cc_major < 2 ) {
// Run kernel
dim3 thread(THREAD_BLOCK_SIZE);
dim3 grid(gpujpeg_div_and_round_up(coder->segment_count, thread.x));
hipLaunchKernelGGL(( gpujpeg_huffman_encoder_encode_kernel), dim3(grid), dim3(thread), 0, 0,
coder->d_component,
coder->d_segment,
comp_count,
coder->segment_count,
coder->d_temp_huffman
);
hipDeviceSynchronize();
gpujpeg_cuda_check_error("Huffman encoding failed");
} else {
// Run encoder kernel
dim3 thread(32 * WARPS_NUM);
dim3 grid = gpujpeg_huffman_gpu_encoder_grid_size(gpujpeg_div_and_round_up(coder->segment_count, (thread.x / 32)));
if(comp_count == 1) {
hipLaunchKernelGGL(( gpujpeg_huffman_encoder_encode_kernel_warp<true>), dim3(grid), dim3(thread), 0, 0,
coder->d_segment,
coder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized,
coder->d_component,
comp_count
);
} else {
hipLaunchKernelGGL(( gpujpeg_huffman_encoder_encode_kernel_warp<false>), dim3(grid), dim3(thread), 0, 0,
coder->d_segment,
coder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized,
coder->d_component,
comp_count
);
}
hipDeviceSynchronize();
gpujpeg_cuda_check_error("Huffman encoding failed");
// Run codeword serialization kernel
const int num_serialization_tblocks = gpujpeg_div_and_round_up(coder->segment_count, SERIALIZATION_THREADS_PER_TBLOCK);
const dim3 serialization_grid = gpujpeg_huffman_gpu_encoder_grid_size(num_serialization_tblocks);
hipLaunchKernelGGL(( gpujpeg_huffman_encoder_serialization_kernel), dim3(num_serialization_tblocks), dim3(SERIALIZATION_THREADS_PER_TBLOCK), 0, 0,
coder->d_segment,
coder->segment_count,
coder->d_data_compressed,
coder->d_temp_huffman
);
hipDeviceSynchronize();
gpujpeg_cuda_check_error("Codeword serialization failed");
}
// No atomic operations in CC 1.0 => run output size computation kernel to allocate the output buffer space
if ( encoder->coder.cuda_cc_major == 1 && encoder->coder.cuda_cc_minor == 0 ) {
hipLaunchKernelGGL(( gpujpeg_huffman_encoder_allocation_kernel), dim3(1), dim3(512), 0, 0, coder->d_segment, coder->segment_count);
hipDeviceSynchronize();
gpujpeg_cuda_check_error("Huffman encoder output allocation failed");
}
// Run output compaction kernel (one warp per segment)
const dim3 compaction_thread(32, WARPS_NUM);
const dim3 compaction_grid = gpujpeg_huffman_gpu_encoder_grid_size(gpujpeg_div_and_round_up(coder->segment_count, WARPS_NUM));
hipLaunchKernelGGL(( gpujpeg_huffman_encoder_compaction_kernel), dim3(compaction_grid), dim3(compaction_thread), 0, 0,
coder->d_segment,
coder->segment_count,
coder->d_temp_huffman,
coder->d_data_compressed
);
hipDeviceSynchronize();
gpujpeg_cuda_check_error("Huffman output compaction failed");
// Read and return number of occupied bytes
hipMemcpyFromSymbol(output_byte_count, gpujpeg_huffman_output_byte_count, sizeof(unsigned int), 0, hipMemcpyDeviceToHost);
gpujpeg_cuda_check_error("Huffman output size getting failed");
// indicate success
return 0;
}
| 2aabfcdc5277f911b6cfcc5d8c43854957b827a8.cu | /**
* Copyright (c) 2011, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "gpujpeg_huffman_gpu_encoder.h"
#include <libgpujpeg/gpujpeg_util.h>
#define WARPS_NUM 8
/** Natural order in constant memory */
__constant__ int gpujpeg_huffman_gpu_encoder_order_natural[GPUJPEG_ORDER_NATURAL_SIZE];
/** Size of occupied part of output buffer */
__device__ unsigned int gpujpeg_huffman_output_byte_count;
/**
* Huffman coding tables in constant memory - each has 257 items (256 + 1 extra)
* There are are 4 of them - one after another, in following order:
* - luminance (Y) AC
* - luminance (Y) DC
* - chroma (cb/cr) AC
* - chroma (cb/cr) DC
*/
__device__ uint32_t gpujpeg_huffman_gpu_lut[(256 + 1) * 4];
/**
* Value decomposition in constant memory (input range from -4096 to 4095 ... both inclusive)
* Mapping from coefficient value into the code for the value ind its bit size.
*/
__device__ unsigned int gpujpeg_huffman_value_decomposition[8 * 1024];
/**
* Initializes coefficient decomposition table in global memory. (CC >= 2.0)
* Output table is a mapping from some value into its code and bit size.
*/
__global__ static void
gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel() {
// fetch some value
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int value = tid - 4096;
// decompose it
unsigned int value_code = value;
int absolute = value;
if ( value < 0 ) {
// valu eis now absolute value of input
absolute = -absolute;
// For a negative input, want temp2 = bitwise complement of abs(input)
// This code assumes we are on a two's complement machine
value_code--;
}
// Find the number of bits needed for the magnitude of the coefficient
unsigned int value_nbits = 0;
while ( absolute ) {
value_nbits++;
absolute >>= 1;
}
// save result packed into unsigned int (value bits are left aligned in MSBs and size is right aligned in LSBs)
gpujpeg_huffman_value_decomposition[tid] = value_nbits | (value_code << (32 - value_nbits));
}
#if __CUDA_ARCH__ >= 200
/**
* Adds up to 32 bits at once into ouptut buffer, applying byte stuffing.
* Codeword value must be aligned to left (most significant bits). (CC >= 2.0)
*/
__device__ static void
gpujpeg_huffman_gpu_encoder_emit_bits(unsigned int & remaining_bits, int & byte_count, int & bit_count, uint8_t * const out_ptr, const unsigned int packed_code_word)
{
// decompose packed codeword into the msb-aligned value and bit-length of the value
const unsigned int code_word = packed_code_word & ~31;
const unsigned int code_bit_size = packed_code_word & 31;
// concatenate with remaining bits
remaining_bits |= code_word >> bit_count;
bit_count += code_bit_size;
// flush some bytes if have more than 8 bits
if (bit_count >= 8) {
do {
const unsigned int out_byte = remaining_bits >> 24;
out_ptr[byte_count++] = out_byte;
if(0xff == out_byte) {
// keep zero byte after each 0xFF (buffer is expected to be zeroed)
out_ptr[byte_count++] = 0;
}
remaining_bits <<= 8;
bit_count -= 8;
} while (bit_count >= 8);
// keep only remaining bits in the buffer
remaining_bits = code_word << (code_bit_size - bit_count);
remaining_bits &= 0xfffffffe << (31 - bit_count);
}
}
/**
* Given some huffman table offset, RLE zero count and coefficient value,
* this returns huffman codeword for the value (packed in 27 MSBs)
* together with its bit size (in 5 LSBs). (CC >= 2.0)
*/
__device__ static unsigned int
gpujpeg_huffman_gpu_encode_value(const int preceding_zero_count, const int coefficient,
const int huffman_lut_offset)
{
// value bits are in MSBs (left aligned) and bit size of the value is in LSBs (right aligned)
const unsigned int packed_value = gpujpeg_huffman_value_decomposition[4096 + coefficient];
// decompose value info into upshifted value and value's bit size
const int value_nbits = packed_value & 0xf;
const unsigned int value_code = packed_value & ~0xf;
// find prefix of the codeword and size of the prefix
const int huffman_lut_idx = huffman_lut_offset + preceding_zero_count * 16 + value_nbits;
const unsigned int packed_prefix = gpujpeg_huffman_gpu_lut[huffman_lut_idx];
const unsigned int prefix_nbits = packed_prefix & 31;
// compose packed codeword with its size
return (packed_prefix + value_nbits) | (value_code >> prefix_nbits);
}
/**
* Flush remaining codewords from buffer in shared memory to global memory output buffer. (CC >= 2.0)
*/
__device__ static void
gpujpeg_huffman_gpu_encoder_flush_codewords(unsigned int * const s_out, unsigned int * &data_compressed, int & remaining_codewords, const int tid) {
// this works for up to 4 * 32 remaining codewords
if(remaining_codewords) {
// pad remaining codewords with extra zero-sized codewords, not to have to use special case in serialization kernel, which saves 4 codewords at once
s_out[remaining_codewords + tid] = 0;
// save all remaining codewords at once (together with some zero sized padding codewords)
*((uint4*)data_compressed) = ((uint4*)s_out)[tid];
// update codeword counter
data_compressed += remaining_codewords;
remaining_codewords = 0;
}
}
/**
* Encode one 8x8 block (CC >= 2.0)
*
* @return 0 if succeeds, otherwise nonzero
*/
__device__ static int
gpujpeg_huffman_gpu_encoder_encode_block(const int16_t * block, unsigned int * &data_compressed, unsigned int * const s_out,
int & remaining_codewords, const int last_dc_idx, int tid, const int huffman_lut_offset)
{
// each thread loads a pair of values (pair after zigzag reordering)
const int load_idx = tid * 2;
int in_even = block[gpujpeg_huffman_gpu_encoder_order_natural[load_idx]];
const int in_odd = block[gpujpeg_huffman_gpu_encoder_order_natural[load_idx + 1]];
// compute preceding zero count for even coefficient (actually compute the count multiplied by 16)
const unsigned int nonzero_mask = (1 << tid) - 1;
const unsigned int nonzero_bitmap_0 = 1 | __ballot(in_even); // DC is always treated as nonzero
const unsigned int nonzero_bitmap_1 = __ballot(in_odd);
const unsigned int nonzero_bitmap_pairs = nonzero_bitmap_0 | nonzero_bitmap_1;
const int zero_pair_count = __clz(nonzero_bitmap_pairs & nonzero_mask);
int zeros_before_even = 2 * (zero_pair_count + tid - 32);
if((0x80000000 >> zero_pair_count) > (nonzero_bitmap_1 & nonzero_mask)) {
zeros_before_even += 1;
}
// true if any nonzero pixel follows thread's odd pixel
const bool nonzero_follows = nonzero_bitmap_pairs & ~nonzero_mask;
// count of consecutive zeros before odd value (either one more than
// even if even is zero or none if even value itself is nonzero)
// (the count is actually multiplied by 16)
int zeros_before_odd = in_even || !tid ? 0 : zeros_before_even + 1;
// clear zero counts if no nonzero pixel follows (so that no 16-zero symbols will be emited)
// otherwise only trim extra bits from the counts of following zeros
const int zero_count_mask = nonzero_follows ? 0xF : 0;
zeros_before_even &= zero_count_mask;
zeros_before_odd &= zero_count_mask;
// pointer to LUT for encoding thread's even value
// (only thread #0 uses DC table, others use AC table)
int even_lut_offset = huffman_lut_offset;
// first thread handles special DC coefficient
if(0 == tid) {
// first thread uses DC part of the table for its even value
even_lut_offset += 256 + 1;
// update last DC coefficient (saved at the special place at the end of the shared bufer)
const int original_in_even = in_even;
in_even -= ((int*)s_out)[last_dc_idx];
((int*)s_out)[last_dc_idx] = original_in_even;
}
// last thread handles special block-termination symbol
if(0 == ((tid ^ 31) | in_odd)) {
// this causes selection of huffman symbol at index 256 (which contains the termination symbol)
zeros_before_odd = 16;
}
// each thread gets codeword for its two pixels
unsigned int even_code = gpujpeg_huffman_gpu_encode_value(zeros_before_even, in_even, even_lut_offset);
unsigned int odd_code = gpujpeg_huffman_gpu_encode_value(zeros_before_odd, in_odd, huffman_lut_offset);
// concatenate both codewords into one if they are short enough
const unsigned int even_code_size = even_code & 31;
const unsigned int odd_code_size = odd_code & 31;
const unsigned int total_size = even_code_size + odd_code_size;
if(total_size <= 27) {
even_code = total_size | ((odd_code & ~31) >> even_code_size) | (even_code & ~31);
odd_code = 0;
}
// each thread get number of preceding nonzero codewords and total number of nonzero codewords in this block
const unsigned int even_codeword_presence = __ballot(even_code);
const unsigned int odd_codeword_presence = __ballot(odd_code);
const int codeword_offset = __popc(nonzero_mask & even_codeword_presence)
+ __popc(nonzero_mask & odd_codeword_presence);
// each thread saves its values into temporary shared buffer
if(even_code) {
s_out[remaining_codewords + codeword_offset] = even_code;
if(odd_code) {
s_out[remaining_codewords + codeword_offset + 1] = odd_code;
}
}
// advance count of codewords in shared memory buffer
remaining_codewords += __popc(odd_codeword_presence) + __popc(even_codeword_presence);
// flush some codewords to global memory if there are too many of them in shared buffer
const int flush_count = 32 * 4; // = half of the buffer
if(remaining_codewords > flush_count) {
// move first half of the buffer into output buffer in global memory and update output pointer
*((uint4*)data_compressed) = ((uint4*)s_out)[tid];
data_compressed += flush_count;
// shift remaining codewords to begin of the buffer and update their count
((uint4*)s_out)[tid] = ((uint4*)s_out)[flush_count / 4 + tid]; // 4 for 4 uints in uint4
remaining_codewords -= flush_count;
}
// nothing to fail here
return 0;
}
#endif // #if __CUDA_ARCH__ >= 200
/**
* Huffman encoder kernel (For compute capability >= 2.0)
*
* @return void
*/
template <bool CONTINUOUS_BLOCK_LIST>
#if __CUDA_ARCH__ >= 200
__launch_bounds__(WARPS_NUM * 32, 1024 / (WARPS_NUM * 32))
#endif
__global__ static void
gpujpeg_huffman_encoder_encode_kernel_warp(
struct gpujpeg_segment* d_segment,
int segment_count,
uint8_t* d_data_compressed,
const uint64_t* const d_block_list,
int16_t* const d_data_quantized,
struct gpujpeg_component* const d_component,
const int comp_count
) {
#if __CUDA_ARCH__ >= 200
int warpidx = threadIdx.x >> 5;
int tid = threadIdx.x & 31;
__shared__ uint4 s_out_all[(64 + 1) * WARPS_NUM];
unsigned int * s_out = (unsigned int*)(s_out_all + warpidx * (64 + 1));
// Number of remaining codewords in shared buffer
int remaining_codewords = 0;
// Select Segment
const int block_idx = blockIdx.x + blockIdx.y * gridDim.x;
const int segment_index = block_idx * WARPS_NUM + warpidx;
// first thread initializes compact output size for next kernel
if(0 == tid && 0 == warpidx && 0 == block_idx) {
gpujpeg_huffman_output_byte_count = 0;
}
// stop if out of segment bounds
if ( segment_index >= segment_count )
return;
struct gpujpeg_segment* segment = &d_segment[segment_index];
// Initialize last DC coefficients
if(tid < 3) {
s_out[256 + tid] = 0;
}
// Prepare data pointers
unsigned int * data_compressed = (unsigned int*)(d_data_compressed + segment->data_temp_index);
unsigned int * data_compressed_start = data_compressed;
// Pre-add thread ID to output pointer (it's allways used only with it)
data_compressed += (tid * 4);
// Encode all block in segment
if(CONTINUOUS_BLOCK_LIST) {
// Get component for current scan
const struct gpujpeg_component* component = &d_component[segment->scan_index];
// mcu size of the component
const int comp_mcu_size = component->mcu_size;
// Get component data for MCU (first block)
const int16_t* block = component->d_data_quantized + (segment->scan_segment_index * component->segment_mcu_count) * comp_mcu_size;
// Get huffman table offset
const int huffman_table_offset = component->type == GPUJPEG_COMPONENT_LUMINANCE ? 0 : (256 + 1) * 2; // possibly skips luminance tables
// Encode MCUs in segment
for (int block_count = segment->mcu_count; block_count--;) {
// Encode 8x8 block
gpujpeg_huffman_gpu_encoder_encode_block(block, data_compressed, s_out, remaining_codewords, 256, tid, huffman_table_offset);
// Advance to next block
block += comp_mcu_size;
}
} else {
// Pointer to segment's list of 8x8 blocks and their count
const uint64_t* packed_block_info_ptr = d_block_list + segment->block_index_list_begin;
// Encode all blocks
for(int block_count = segment->block_count; block_count--;) {
// Get pointer to next block input data and info about its color type
const uint64_t packed_block_info = *(packed_block_info_ptr++);
// Get coder parameters
const int last_dc_idx = 256 + (packed_block_info & 0x7f);
// Get offset to right part of huffman table
const int huffman_table_offset = packed_block_info & 0x80 ? (256 + 1) * 2 : 0; // possibly skips luminance tables
// Source data pointer
int16_t* block = &d_data_quantized[packed_block_info >> 8];
// Encode 8x8 block
gpujpeg_huffman_gpu_encoder_encode_block(block, data_compressed, s_out, remaining_codewords, last_dc_idx, tid, huffman_table_offset);
}
}
// flush remaining codewords
gpujpeg_huffman_gpu_encoder_flush_codewords(s_out, data_compressed, remaining_codewords, tid);
// Set number of codewords.
if (tid == 0 ) {
segment->data_compressed_size = data_compressed - data_compressed_start;
}
#endif // #if __CUDA_ARCH__ >= 200
}
#define SERIALIZATION_THREADS_PER_TBLOCK 192
/**
* Codeword serialization kernel (CC >= 2.0).
*
* @return void
*/
#if __CUDA_ARCH__ >= 200
__launch_bounds__(SERIALIZATION_THREADS_PER_TBLOCK, 1536 / SERIALIZATION_THREADS_PER_TBLOCK)
#endif
__global__ static void
gpujpeg_huffman_encoder_serialization_kernel(
struct gpujpeg_segment* d_segment,
int segment_count,
const uint8_t* const d_src,
uint8_t* const d_dest
) {
#if __CUDA_ARCH__ >= 200
// Temp buffer for all threads of the threadblock
__shared__ uint4 s_temp_all[2 * SERIALIZATION_THREADS_PER_TBLOCK];
// Thread's 32 bytes in shared memory for output composition
uint4 * const s_temp = s_temp_all + threadIdx.x * 2;
// Select Segment
const int block_idx = blockIdx.x + blockIdx.y * gridDim.x;
int segment_index = block_idx * SERIALIZATION_THREADS_PER_TBLOCK + threadIdx.x;
if ( segment_index >= segment_count )
return;
// Thread's segment
struct gpujpeg_segment* const segment = &d_segment[segment_index];
// Input and output pointers
const int data_offset = segment->data_temp_index;
uint4 * const d_dest_stream_start = (uint4*)(d_dest + data_offset);
uint4 * d_dest_stream = d_dest_stream_start;
const uint4 * d_src_codewords = (uint4*)(d_src + data_offset);
// number of bytes in the temp buffer, remaining bits and their count
int byte_count = 0, bit_count = 0;
unsigned int remaining_bits = 0;
// "data_compressed_size" is now initialized to number of codewords to be serialized
for(int cword_tuple_count = (segment->data_compressed_size + 3) >> 2; cword_tuple_count--; ) // reading 4 codewords at once
{
// read 4 codewords and advance input pointer to next ones
const uint4 cwords = *(d_src_codewords++);
// encode first pair of codewords
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.x);
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.y);
// possibly flush output if have at least 16 bytes
if(byte_count >= 16) {
// write 16 bytes into destination buffer
*(d_dest_stream++) = s_temp[0];
// move remaining bytes to first half of the buffer
s_temp[0] = s_temp[1];
// update number of remaining bits
byte_count -= 16;
}
// encode other two codewords
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.z);
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.w);
// possibly flush output if have at least 16 bytes
if(byte_count >= 16) {
// write 16 bytes into destination buffer
*(d_dest_stream++) = s_temp[0];
// move remaining bytes to first half of the buffer
s_temp[0] = s_temp[1];
// update number of remaining bits
byte_count -= 16;
}
}
// Emit left bits
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, 0xfe000007);
// Terminate codestream with restart marker
((uint8_t*)s_temp)[byte_count + 0] = 0xFF;
((uint8_t*)s_temp)[byte_count + 1] = GPUJPEG_MARKER_RST0 + (segment->scan_segment_index % 8);
// flush remaining bytes
d_dest_stream[0] = s_temp[0];
d_dest_stream[1] = s_temp[1];
// Set compressed size
segment->data_compressed_size = (d_dest_stream - d_dest_stream_start) * 16 + byte_count + 2;
#endif // #if __CUDA_ARCH__ >= 200
}
/**
* Huffman coder compact output allocation kernel - serially reserves
* some space for compressed output of segments in output buffer.
* (For CC 1.0 - a workaround for missing atomic operations.)
*
* Only single threadblock with 512 threads is launched.
*/
__global__ static void
gpujpeg_huffman_encoder_allocation_kernel (
struct gpujpeg_segment* const d_segment,
const int segment_count
) {
// offsets of segments
__shared__ unsigned int s_segment_offsets[512];
// cumulative sum of bytes of all segments
unsigned int total_byte_count = 0;
// iterate over all segments
const unsigned int segment_idx_end = (segment_count + 511) & ~511;
for(unsigned int segment_idx = threadIdx.x; segment_idx < segment_idx_end; segment_idx += 512) {
// all threads load byte sizes of their segments (rounded up to next multiple of 16 B) into the shared array
s_segment_offsets[threadIdx.x] = segment_idx < segment_count
? (d_segment[segment_idx].data_compressed_size + 15) & ~15
: 0;
// first thread runs a sort of serial prefix sum over the segment sizes to get their offsets
__syncthreads();
if(0 == threadIdx.x) {
#pragma unroll 4
for(int i = 0; i < 512; i++) {
const unsigned int segment_size = s_segment_offsets[i];
s_segment_offsets[i] = total_byte_count;
total_byte_count += segment_size;
}
}
__syncthreads();
// all threads write offsets back into corresponding segment structures
if(segment_idx < segment_count) {
d_segment[segment_idx].data_compressed_index = s_segment_offsets[threadIdx.x];
}
}
// first thread finally saves the total sum of bytes needed for compressed data
if(threadIdx.x == 0) {
gpujpeg_huffman_output_byte_count = total_byte_count;
}
}
/**
* Huffman coder output compaction kernel.
*
* @return void
*/
__global__ static void
gpujpeg_huffman_encoder_compaction_kernel (
struct gpujpeg_segment* const d_segment,
const int segment_count,
const uint8_t* const d_src,
uint8_t* const d_dest
) {
// get some segment (size of threadblocks is 32 x N, so threadIdx.y is warp index)
const int block_idx = blockIdx.x + blockIdx.y * gridDim.x;
const int segment_idx = threadIdx.y + block_idx * blockDim.y;
if(segment_idx >= segment_count) {
return;
}
// temp variables for all warps
__shared__ uint4* volatile s_out_ptrs[WARPS_NUM];
// get info about the segment
const unsigned int segment_byte_count = (d_segment[segment_idx].data_compressed_size + 15) & ~15; // number of bytes rounded up to multiple of 16
const unsigned int segment_in_offset = d_segment[segment_idx].data_temp_index; // this should be aligned at least to 16byte boundary
// first thread of each warp reserves space in output buffer
if(0 == threadIdx.x) {
// Either load precomputed output offset (for CC 1.0) or compute it now (for CCs with atomic operations)
#if __CUDA_ARCH__ == 100
const unsigned int segment_out_offset = d_segment[segment_idx].data_compressed_index;
#else
const unsigned int segment_out_offset = atomicAdd(&gpujpeg_huffman_output_byte_count, segment_byte_count);
d_segment[segment_idx].data_compressed_index = segment_out_offset;
#endif
s_out_ptrs[threadIdx.y] = (uint4*)(d_dest + segment_out_offset);
}
// all threads read output buffer offset for their segment and prepare input and output pointers and number of copy iterations
const uint4 * d_in = threadIdx.x + (uint4*)(d_src + segment_in_offset);
uint4 * d_out = threadIdx.x + s_out_ptrs[threadIdx.y];
unsigned int copy_iterations = segment_byte_count / 512; // 512 is number of bytes copied in each iteration (32 threads * 16 bytes per thread)
// copy the data!
while(copy_iterations--) {
*d_out = *d_in;
d_out += 32;
d_in += 32;
}
// copy remaining bytes (less than 512 bytes)
if((threadIdx.x * 16) < (segment_byte_count & 511)) {
*d_out = *d_in;
}
}
// Threadblock size for CC 1.x kernel
#define THREAD_BLOCK_SIZE 48
#ifdef GPUJPEG_HUFFMAN_CODER_TABLES_IN_CONSTANT
__constant__
#endif
/** Allocate huffman tables in constant memory */
__device__ struct gpujpeg_table_huffman_encoder gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_TYPE_COUNT][GPUJPEG_HUFFMAN_TYPE_COUNT];
/**
* Write one byte to compressed data (CC 1.x)
*
* @param data_compressed Data compressed
* @param value Byte value to write
* @return void
*/
#define gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, value) { \
*data_compressed = (uint8_t)(value); \
data_compressed++; }
/**
* Write two bytes to compressed data (CC 1.x)
*
* @param data_compressed Data compressed
* @param value Two-byte value to write
* @return void
*/
#define gpujpeg_huffman_gpu_encoder_emit_2byte(data_compressed, value) { \
*data_compressed = (uint8_t)(((value) >> 8) & 0xFF); \
data_compressed++; \
*data_compressed = (uint8_t)((value) & 0xFF); \
data_compressed++; }
/**
* Write marker to compressed data (CC 1.x)
*
* @param data_compressed Data compressed
* @oaran marker Marker to write (JPEG_MARKER_...)
* @return void
*/
#define gpujpeg_huffman_gpu_encoder_marker(data_compressed, marker) { \
*data_compressed = 0xFF;\
data_compressed++; \
*data_compressed = (uint8_t)(marker); \
data_compressed++; }
/**
* Output bits to the file. Only the right 24 bits of put_buffer are used;
* the valid bits are left-justified in this part. At most 16 bits can be
* passed to EmitBits in one call, and we never retain more than 7 bits
* in put_buffer between calls, so 24 bits are sufficient. Version for CC 1.x
*
* @param coder Huffman coder structure
* @param code Huffman code
* @param size Size in bits of the Huffman code
* @return void
*/
__device__ static int
gpujpeg_huffman_gpu_encoder_emit_bits(unsigned int code, int size, int & put_value, int & put_bits, uint8_t* & data_compressed)
{
// This routine is heavily used, so it's worth coding tightly
int _put_buffer = (int)code;
int _put_bits = put_bits;
// If size is 0, caller used an invalid Huffman table entry
if ( size == 0 )
return -1;
// Mask off any extra bits in code
_put_buffer &= (((int)1) << size) - 1;
// New number of bits in buffer
_put_bits += size;
// Align incoming bits
_put_buffer <<= 24 - _put_bits;
// And merge with old buffer contents
_put_buffer |= put_value;
// If there are more than 8 bits, write it out
unsigned char uc;
while ( _put_bits >= 8 ) {
// Write one byte out
uc = (unsigned char) ((_put_buffer >> 16) & 0xFF);
gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, uc);
// If need to stuff a zero byte
if ( uc == 0xFF ) {
// Write zero byte out
gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, 0);
}
_put_buffer <<= 8;
_put_bits -= 8;
}
// update state variables
put_value = _put_buffer;
put_bits = _put_bits;
return 0;
}
/**
* Emit left bits (CC 1.x)
*
* @param coder Huffman coder structure
* @return void
*/
__device__ static void
gpujpeg_huffman_gpu_encoder_emit_left_bits(int & put_value, int & put_bits, uint8_t* & data_compressed)
{
// Fill 7 bits with ones
if ( gpujpeg_huffman_gpu_encoder_emit_bits(0x7F, 7, put_value, put_bits, data_compressed) != 0 )
return;
//unsigned char uc = (unsigned char) ((put_value >> 16) & 0xFF);
// Write one byte out
//gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, uc);
put_value = 0;
put_bits = 0;
}
/**
* Encode one 8x8 block (for CC 1.x)
*
* @return 0 if succeeds, otherwise nonzero
*/
__device__ static int
gpujpeg_huffman_gpu_encoder_encode_block(int & put_value, int & put_bits, int & dc, int16_t* data, uint8_t* & data_compressed,
struct gpujpeg_table_huffman_encoder* d_table_dc, struct gpujpeg_table_huffman_encoder* d_table_ac)
{
typedef uint64_t loading_t;
const int loading_iteration_count = 64 * 2 / sizeof(loading_t);
// Load block to shared memory
__shared__ int16_t s_data[64 * THREAD_BLOCK_SIZE];
for ( int i = 0; i < loading_iteration_count; i++ ) {
((loading_t*)s_data)[loading_iteration_count * threadIdx.x + i] = ((loading_t*)data)[i];
}
int data_start = 64 * threadIdx.x;
// Encode the DC coefficient difference per section F.1.2.1
int temp = s_data[data_start + 0] - dc;
dc = s_data[data_start + 0];
int temp2 = temp;
if ( temp < 0 ) {
// Temp is abs value of input
temp = -temp;
// For a negative input, want temp2 = bitwise complement of abs(input)
// This code assumes we are on a two's complement machine
temp2--;
}
// Find the number of bits needed for the magnitude of the coefficient
int nbits = 0;
while ( temp ) {
nbits++;
temp >>= 1;
}
// Write category number
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_dc->code[nbits], d_table_dc->size[nbits], put_value, put_bits, data_compressed) != 0 ) {
return -1;
}
// Write category offset (EmitBits rejects calls with size 0)
if ( nbits ) {
if ( gpujpeg_huffman_gpu_encoder_emit_bits((unsigned int) temp2, nbits, put_value, put_bits, data_compressed) != 0 )
return -1;
}
// Encode the AC coefficients per section F.1.2.2 (r = run length of zeros)
int r = 0;
for ( int k = 1; k < 64; k++ )
{
temp = s_data[data_start + gpujpeg_huffman_gpu_encoder_order_natural[k]];
if ( temp == 0 ) {
r++;
}
else {
// If run length > 15, must emit special run-length-16 codes (0xF0)
while ( r > 15 ) {
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[0xF0], d_table_ac->size[0xF0], put_value, put_bits, data_compressed) != 0 )
return -1;
r -= 16;
}
temp2 = temp;
if ( temp < 0 ) {
// temp is abs value of input
temp = -temp;
// This code assumes we are on a two's complement machine
temp2--;
}
// Find the number of bits needed for the magnitude of the coefficient
// there must be at least one 1 bit
nbits = 1;
while ( (temp >>= 1) )
nbits++;
// Emit Huffman symbol for run length / number of bits
int i = (r << 4) + nbits;
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[i], d_table_ac->size[i], put_value, put_bits, data_compressed) != 0 )
return -1;
// Write Category offset
if ( gpujpeg_huffman_gpu_encoder_emit_bits((unsigned int) temp2, nbits, put_value, put_bits, data_compressed) != 0 )
return -1;
r = 0;
}
}
// If all the left coefs were zero, emit an end-of-block code
if ( r > 0 ) {
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[0], d_table_ac->size[0], put_value, put_bits, data_compressed) != 0 )
return -1;
}
return 0;
}
/**
* Huffman encoder kernel (for CC 1.x)
*
* @return void
*/
__global__ static void
gpujpeg_huffman_encoder_encode_kernel(
struct gpujpeg_component* d_component,
struct gpujpeg_segment* d_segment,
int comp_count,
int segment_count,
uint8_t* d_data_compressed
)
{
int segment_index = blockIdx.x * blockDim.x + threadIdx.x;
if ( segment_index >= segment_count )
return;
struct gpujpeg_segment* segment = &d_segment[segment_index];
// first thread initializes compact output size for next kernel
if(0 == segment_index) {
gpujpeg_huffman_output_byte_count = 0;
}
// Initialize huffman coder
int put_value = 0;
int put_bits = 0;
int dc[GPUJPEG_MAX_COMPONENT_COUNT];
for ( int comp = 0; comp < GPUJPEG_MAX_COMPONENT_COUNT; comp++ )
dc[comp] = 0;
// Prepare data pointers
uint8_t* data_compressed = &d_data_compressed[segment->data_temp_index];
uint8_t* data_compressed_start = data_compressed;
// Non-interleaving mode
if ( comp_count == 1 ) {
int segment_index = segment->scan_segment_index;
// Encode MCUs in segment
for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
// Get component for current scan
struct gpujpeg_component* component = &d_component[segment->scan_index];
// Get component data for MCU
int16_t* block = &component->d_data_quantized[(segment_index * component->segment_mcu_count + mcu_index) * component->mcu_size];
// Get coder parameters
int & component_dc = dc[segment->scan_index];
// Get huffman tables
struct gpujpeg_table_huffman_encoder* d_table_dc = NULL;
struct gpujpeg_table_huffman_encoder* d_table_ac = NULL;
if ( component->type == GPUJPEG_COMPONENT_LUMINANCE ) {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC];
} else {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC];
}
// Encode 8x8 block
if ( gpujpeg_huffman_gpu_encoder_encode_block(put_value, put_bits, component_dc, block, data_compressed, d_table_dc, d_table_ac) != 0 )
break;
}
}
// Interleaving mode
else {
int segment_index = segment->scan_segment_index;
// Encode MCUs in segment
for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
//assert(segment->scan_index == 0);
for ( int comp = 0; comp < comp_count; comp++ ) {
struct gpujpeg_component* component = &d_component[comp];
// Prepare mcu indexes
int mcu_index_x = (segment_index * component->segment_mcu_count + mcu_index) % component->mcu_count_x;
int mcu_index_y = (segment_index * component->segment_mcu_count + mcu_index) / component->mcu_count_x;
// Compute base data index
int data_index_base = mcu_index_y * (component->mcu_size * component->mcu_count_x) + mcu_index_x * (component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
// For all vertical 8x8 blocks
for ( int y = 0; y < component->sampling_factor.vertical; y++ ) {
// Compute base row data index
int data_index_row = data_index_base + y * (component->mcu_count_x * component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
// For all horizontal 8x8 blocks
for ( int x = 0; x < component->sampling_factor.horizontal; x++ ) {
// Compute 8x8 block data index
int data_index = data_index_row + x * GPUJPEG_BLOCK_SIZE * GPUJPEG_BLOCK_SIZE;
// Get component data for MCU
int16_t* block = &component->d_data_quantized[data_index];
// Get coder parameters
int & component_dc = dc[comp];
// Get huffman tables
struct gpujpeg_table_huffman_encoder* d_table_dc = NULL;
struct gpujpeg_table_huffman_encoder* d_table_ac = NULL;
if ( component->type == GPUJPEG_COMPONENT_LUMINANCE ) {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC];
} else {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC];
}
// Encode 8x8 block
gpujpeg_huffman_gpu_encoder_encode_block(put_value, put_bits, component_dc, block, data_compressed, d_table_dc, d_table_ac);
}
}
}
}
}
// Emit left bits
if ( put_bits > 0 )
gpujpeg_huffman_gpu_encoder_emit_left_bits(put_value, put_bits, data_compressed);
// Output restart marker
int restart_marker = GPUJPEG_MARKER_RST0 + (segment->scan_segment_index % 8);
gpujpeg_huffman_gpu_encoder_marker(data_compressed, restart_marker);
// Set compressed size
segment->data_compressed_size = data_compressed - data_compressed_start;
}
/** Adds packed coefficients into the GPU version of Huffman lookup table. */
void
gpujpeg_huffman_gpu_add_packed_table(uint32_t * const dest, const struct gpujpeg_table_huffman_encoder * const src, const bool is_ac) {
// make a upshifted copy of the table for GPU encoding
for ( int i = 0; i <= 256; i++ ) {
const int size = src->size[i & 0xFF];
dest[i] = (src->code[i & 0xFF] << (32 - size)) | size;
}
// reserve first index in GPU version of AC table for special purposes
if ( is_ac ) {
dest[0] = 0;
}
}
/** Documented at declaration */
int
gpujpeg_huffman_gpu_encoder_init(const struct gpujpeg_encoder * encoder)
{
// Initialize decomposition lookup table
cudaFuncSetCacheConfig(gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel, cudaFuncCachePreferShared);
gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel<<<32, 256>>>(); // 8192 threads total
cudaThreadSynchronize();
gpujpeg_cuda_check_error("Decomposition LUT initialization failed");
// compose GPU version of the huffman LUT and copy it into GPU memory (for CC >= 2.0)
uint32_t gpujpeg_huffman_cpu_lut[(256 + 1) * 4];
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 0, &encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC], true);
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 1, &encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC], false);
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 2, &encoder->table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC], true);
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 3, &encoder->table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC], false);
cudaMemcpyToSymbol(
gpujpeg_huffman_gpu_lut,
gpujpeg_huffman_cpu_lut,
(256 + 1) * 4 * sizeof(*gpujpeg_huffman_gpu_lut),
0,
cudaMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman encoder init (Huffman LUT copy)");
// Copy original Huffman coding tables to GPU memory (for CC 1.x)
cudaMemcpyToSymbol(
gpujpeg_huffman_gpu_encoder_table_huffman,
&encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC],
sizeof(gpujpeg_huffman_gpu_encoder_table_huffman),
0,
cudaMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman encoder init (Huffman coding table)");
// Copy natural order to constant device memory
cudaMemcpyToSymbol(
gpujpeg_huffman_gpu_encoder_order_natural,
gpujpeg_order_natural,
GPUJPEG_ORDER_NATURAL_SIZE * sizeof(int),
0,
cudaMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman encoder init (natural order copy)");
// Configure more shared memory for all kernels
cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel_warp<true>, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel_warp<false>, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_serialization_kernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_compaction_kernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_allocation_kernel, cudaFuncCachePreferShared);
return 0;
}
/**
* Get grid size for specified count of threadblocks. (Grid size is limited
* to 65536 in both directions, so if we need more threadblocks, we must use
* both x and y coordinates.)
*/
dim3
gpujpeg_huffman_gpu_encoder_grid_size(int tblock_count)
{
dim3 size(tblock_count);
while(size.x > 0xffff) {
size.x = (size.x + 1) >> 1;
size.y <<= 1;
}
return size;
}
/** Documented at declaration */
int
gpujpeg_huffman_gpu_encoder_encode(struct gpujpeg_encoder* encoder, unsigned int * output_byte_count)
{
// Get coder
struct gpujpeg_coder* coder = &encoder->coder;
assert(coder->param.restart_interval > 0);
// Select encoder kernel which either expects continuos segments of blocks or uses block lists
int comp_count = 1;
if ( coder->param.interleaved == 1 )
comp_count = coder->param_image.comp_count;
assert(comp_count >= 1 && comp_count <= GPUJPEG_MAX_COMPONENT_COUNT);
// Select encoder kernel based on compute capability
if ( encoder->coder.cuda_cc_major < 2 ) {
// Run kernel
dim3 thread(THREAD_BLOCK_SIZE);
dim3 grid(gpujpeg_div_and_round_up(coder->segment_count, thread.x));
gpujpeg_huffman_encoder_encode_kernel<<<grid, thread>>>(
coder->d_component,
coder->d_segment,
comp_count,
coder->segment_count,
coder->d_temp_huffman
);
cudaThreadSynchronize();
gpujpeg_cuda_check_error("Huffman encoding failed");
} else {
// Run encoder kernel
dim3 thread(32 * WARPS_NUM);
dim3 grid = gpujpeg_huffman_gpu_encoder_grid_size(gpujpeg_div_and_round_up(coder->segment_count, (thread.x / 32)));
if(comp_count == 1) {
gpujpeg_huffman_encoder_encode_kernel_warp<true><<<grid, thread>>>(
coder->d_segment,
coder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized,
coder->d_component,
comp_count
);
} else {
gpujpeg_huffman_encoder_encode_kernel_warp<false><<<grid, thread>>>(
coder->d_segment,
coder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized,
coder->d_component,
comp_count
);
}
cudaThreadSynchronize();
gpujpeg_cuda_check_error("Huffman encoding failed");
// Run codeword serialization kernel
const int num_serialization_tblocks = gpujpeg_div_and_round_up(coder->segment_count, SERIALIZATION_THREADS_PER_TBLOCK);
const dim3 serialization_grid = gpujpeg_huffman_gpu_encoder_grid_size(num_serialization_tblocks);
gpujpeg_huffman_encoder_serialization_kernel<<<num_serialization_tblocks, SERIALIZATION_THREADS_PER_TBLOCK>>>(
coder->d_segment,
coder->segment_count,
coder->d_data_compressed,
coder->d_temp_huffman
);
cudaThreadSynchronize();
gpujpeg_cuda_check_error("Codeword serialization failed");
}
// No atomic operations in CC 1.0 => run output size computation kernel to allocate the output buffer space
if ( encoder->coder.cuda_cc_major == 1 && encoder->coder.cuda_cc_minor == 0 ) {
gpujpeg_huffman_encoder_allocation_kernel<<<1, 512>>>(coder->d_segment, coder->segment_count);
cudaThreadSynchronize();
gpujpeg_cuda_check_error("Huffman encoder output allocation failed");
}
// Run output compaction kernel (one warp per segment)
const dim3 compaction_thread(32, WARPS_NUM);
const dim3 compaction_grid = gpujpeg_huffman_gpu_encoder_grid_size(gpujpeg_div_and_round_up(coder->segment_count, WARPS_NUM));
gpujpeg_huffman_encoder_compaction_kernel<<<compaction_grid, compaction_thread>>>(
coder->d_segment,
coder->segment_count,
coder->d_temp_huffman,
coder->d_data_compressed
);
cudaThreadSynchronize();
gpujpeg_cuda_check_error("Huffman output compaction failed");
// Read and return number of occupied bytes
cudaMemcpyFromSymbol(output_byte_count, gpujpeg_huffman_output_byte_count, sizeof(unsigned int), 0, cudaMemcpyDeviceToHost);
gpujpeg_cuda_check_error("Huffman output size getting failed");
// indicate success
return 0;
}
|
f4850aa9f4953180394903caeacee9eda701d732.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void calculateCount(int *keypoints ,const unsigned char *in, float *allProbablities, int *allIndexList, int patchSize, int width, int height, int fernNum, int fernSize, int lenght, int REGULARIZATION_TERM){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int y = keypoints[index*2];
int x = keypoints[index*2+1];
int startX = x - patchSize;
int endX = x + patchSize;
int startY = y - patchSize;
int endY = y + patchSize;
if(startX < 0 ){
startX = 0;
}
if (endX >= width ){
endX = width -1;
}
if(startY < 0 ){
startY = 0;
}
if (endY >= height){
endY = height -1;
}
int patchHeight = endX - startX;
int patchLenght = patchHeight * (endY - startY);
int patch[1024];
int count = 0;
for(int j= 0; j < patchHeight; j++){
for(int i = startY ; i < endY; i++){
patch[count] = in[startX*height+i];
count++;
}
startX = startX +1;
}
int I1, I2,num, decimalNum, index2;
for(int i = 0; i< fernNum ; i++){
decimalNum = 0;
num = lenght/2;
for(int j = 0; j < fernSize; j++){
index2 = (fernSize*i*2)+(j*2);
I1 = allIndexList[index2];
I2 = allIndexList[index2+1];
if(I1 < patchLenght && I2 < patchLenght){
if(patch[I1] < patch[I2]){
decimalNum = decimalNum +num;
}
num = num /2;
}
}
allProbablities[index*lenght+decimalNum] = allProbablities[index*lenght+decimalNum]+ 1;
}
for(int i = 0; i< lenght; i++){
float num2 = allProbablities[index*lenght+i];
float value = (num2 + REGULARIZATION_TERM) / (fernNum + lenght*REGULARIZATION_TERM);
allProbablities[index*lenght+i] = value;
}
} | f4850aa9f4953180394903caeacee9eda701d732.cu | __global__ void calculateCount(int *keypoints ,const unsigned char *in, float *allProbablities, int *allIndexList, int patchSize, int width, int height, int fernNum, int fernSize, int lenght, int REGULARIZATION_TERM){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int y = keypoints[index*2];
int x = keypoints[index*2+1];
int startX = x - patchSize;
int endX = x + patchSize;
int startY = y - patchSize;
int endY = y + patchSize;
if(startX < 0 ){
startX = 0;
}
if (endX >= width ){
endX = width -1;
}
if(startY < 0 ){
startY = 0;
}
if (endY >= height){
endY = height -1;
}
int patchHeight = endX - startX;
int patchLenght = patchHeight * (endY - startY);
int patch[1024];
int count = 0;
for(int j= 0; j < patchHeight; j++){
for(int i = startY ; i < endY; i++){
patch[count] = in[startX*height+i];
count++;
}
startX = startX +1;
}
int I1, I2,num, decimalNum, index2;
for(int i = 0; i< fernNum ; i++){
decimalNum = 0;
num = lenght/2;
for(int j = 0; j < fernSize; j++){
index2 = (fernSize*i*2)+(j*2);
I1 = allIndexList[index2];
I2 = allIndexList[index2+1];
if(I1 < patchLenght && I2 < patchLenght){
if(patch[I1] < patch[I2]){
decimalNum = decimalNum +num;
}
num = num /2;
}
}
allProbablities[index*lenght+decimalNum] = allProbablities[index*lenght+decimalNum]+ 1;
}
for(int i = 0; i< lenght; i++){
float num2 = allProbablities[index*lenght+i];
float value = (num2 + REGULARIZATION_TERM) / (fernNum + lenght*REGULARIZATION_TERM);
allProbablities[index*lenght+i] = value;
}
} |
071e62d44d6849241c9847758d02a046aeea7112.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void res_calc_gpu( double *data, int *count) {
data[0] = 0.0;
(*count)++;
}
// CUDA kernel function
__global__ void op_cuda_res_calc(
double *__restrict ind_arg0,
const int *__restrict opDat0Map,
int *arg1,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg0_l[4];
int arg1_l[1];
for ( int d=0; d<1; d++ ){
arg1_l[d]=ZERO_int;
}
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){
int col2 = -1;
int map0idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<4; d++ ){
arg0_l[d] = ZERO_double;
}
map0idx = opDat0Map[n + offset_b + set_size * 0];
//user-supplied kernel call
res_calc_gpu(arg0_l,
arg1_l);
col2 = colors[n+offset_b];
}
//store local variables
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg0_l[0] += ind_arg0[0+map0idx*4];
arg0_l[1] += ind_arg0[1+map0idx*4];
arg0_l[2] += ind_arg0[2+map0idx*4];
arg0_l[3] += ind_arg0[3+map0idx*4];
ind_arg0[0+map0idx*4] = arg0_l[0];
ind_arg0[1+map0idx*4] = arg0_l[1];
ind_arg0[2+map0idx*4] = arg0_l[2];
ind_arg0[3+map0idx*4] = arg0_l[3];
}
__syncthreads();
}
}
//global reductions
for ( int d=0; d<1; d++ ){
op_reduction<OP_INC>(&arg1[d+blockIdx.x*1],arg1_l[d]);
}
}
//host stub function
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1){
int*arg1h = (int *)arg1.data;
int nargs = 2;
op_arg args[2];
args[0] = arg0;
args[1] = arg1;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(0);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[0].name = name;
OP_kernels[0].count += 1;
int ninds = 1;
int inds[2] = {0,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc\n");
}
//get plan
#ifdef OP_PART_SIZE_0
int part_size = OP_PART_SIZE_0;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//transfer global reduction data to GPU
int maxblocks = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
maxblocks = MAX(maxblocks,Plan->ncolblk[col]);
}
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(int));
reduct_size = MAX(reduct_size,sizeof(int));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg1.data = OP_reduct_h + reduct_bytes;
arg1.data_d = OP_reduct_d + reduct_bytes;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
((int *)arg1.data)[d+b*1] = ZERO_int;
}
}
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(int));
mvReductArraysToDevice(reduct_bytes);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_0
int nthread = OP_BLOCK_SIZE_0;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
int nshared = reduct_size*nthread;
hipLaunchKernelGGL(( op_cuda_res_calc), dim3(nblocks),dim3(nthread),nshared, 0,
(double *)arg0.data_d,
arg0.map_data_d,
(int*)arg1.data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
//transfer global reduction data back to CPU
if (col == Plan->ncolors_owned-1) {
mvReductArraysToHost(reduct_bytes);
}
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[0].transfer += Plan->transfer;
OP_kernels[0].transfer2 += Plan->transfer2;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg1h[d] = arg1h[d] + ((int *)arg1.data)[d+b*1];
}
}
arg1.data = (char *)arg1h;
op_mpi_reduce(&arg1,arg1h);
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[0].time += wall_t2 - wall_t1;
}
| 071e62d44d6849241c9847758d02a046aeea7112.cu | //
// auto-generated by op2.py
//
//user function
__device__ void res_calc_gpu( double *data, int *count) {
data[0] = 0.0;
(*count)++;
}
// CUDA kernel function
__global__ void op_cuda_res_calc(
double *__restrict ind_arg0,
const int *__restrict opDat0Map,
int *arg1,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg0_l[4];
int arg1_l[1];
for ( int d=0; d<1; d++ ){
arg1_l[d]=ZERO_int;
}
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){
int col2 = -1;
int map0idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<4; d++ ){
arg0_l[d] = ZERO_double;
}
map0idx = opDat0Map[n + offset_b + set_size * 0];
//user-supplied kernel call
res_calc_gpu(arg0_l,
arg1_l);
col2 = colors[n+offset_b];
}
//store local variables
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg0_l[0] += ind_arg0[0+map0idx*4];
arg0_l[1] += ind_arg0[1+map0idx*4];
arg0_l[2] += ind_arg0[2+map0idx*4];
arg0_l[3] += ind_arg0[3+map0idx*4];
ind_arg0[0+map0idx*4] = arg0_l[0];
ind_arg0[1+map0idx*4] = arg0_l[1];
ind_arg0[2+map0idx*4] = arg0_l[2];
ind_arg0[3+map0idx*4] = arg0_l[3];
}
__syncthreads();
}
}
//global reductions
for ( int d=0; d<1; d++ ){
op_reduction<OP_INC>(&arg1[d+blockIdx.x*1],arg1_l[d]);
}
}
//host stub function
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1){
int*arg1h = (int *)arg1.data;
int nargs = 2;
op_arg args[2];
args[0] = arg0;
args[1] = arg1;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(0);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[0].name = name;
OP_kernels[0].count += 1;
int ninds = 1;
int inds[2] = {0,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc\n");
}
//get plan
#ifdef OP_PART_SIZE_0
int part_size = OP_PART_SIZE_0;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//transfer global reduction data to GPU
int maxblocks = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
maxblocks = MAX(maxblocks,Plan->ncolblk[col]);
}
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(int));
reduct_size = MAX(reduct_size,sizeof(int));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg1.data = OP_reduct_h + reduct_bytes;
arg1.data_d = OP_reduct_d + reduct_bytes;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
((int *)arg1.data)[d+b*1] = ZERO_int;
}
}
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(int));
mvReductArraysToDevice(reduct_bytes);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_0
int nthread = OP_BLOCK_SIZE_0;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
int nshared = reduct_size*nthread;
op_cuda_res_calc<<<nblocks,nthread,nshared>>>(
(double *)arg0.data_d,
arg0.map_data_d,
(int*)arg1.data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
//transfer global reduction data back to CPU
if (col == Plan->ncolors_owned-1) {
mvReductArraysToHost(reduct_bytes);
}
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[0].transfer += Plan->transfer;
OP_kernels[0].transfer2 += Plan->transfer2;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg1h[d] = arg1h[d] + ((int *)arg1.data)[d+b*1];
}
}
arg1.data = (char *)arg1h;
op_mpi_reduce(&arg1,arg1h);
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[0].time += wall_t2 - wall_t1;
}
|
c46eebcecc6af050839e785f81a45a4cbcf2532e.hip | // !!! This is a file automatically generated by hipify!!!
#include <gdf/gdf.h>
#include <gdf/utils.h>
#include <gdf/errorutils.h>
#include <gdf/cffi/functions.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/iterator_adaptor.h>
#include <thrust/device_vector.h>
/*
* bit_mask_null_counts Generated using the following code
#include <iostream>
int main()
{
for (int i = 0 ; i != 256 ; i++) {
int count = 0;
for (int p = 0 ; p != 8 ; p++) {
if (i & (1 << p)) {
count++;
}
}
std::cout<<(8-count)<<", ";
}
std::cout<<std::endl;
}
*/
std::vector<gdf_valid_type> bit_mask_null_counts = { 8, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 4, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 5, 4, 4, 3, 4, 3, 3, 2, 4, 3, 3, 2, 3, 2, 2, 1, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 5, 4, 4, 3, 4, 3, 3, 2, 4, 3, 3, 2, 3, 2, 2, 1, 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 5, 4, 4, 3, 4, 3, 3, 2, 4, 3, 3, 2, 3, 2, 2, 1, 5, 4, 4, 3, 4, 3, 3, 2, 4, 3, 3, 2, 3, 2, 2, 1, 4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0 };
unsigned char gdf_num_bits_zero_after_pos(unsigned char number, int pos){
//if pos == 0 then its aligned
if(pos == 0){
return 0;
}
unsigned char count = 0;
for (int p = pos ; p != 8 ; p++) {
if (number & (number << p)) {
count++;
}
}
return (8 - pos) - count;
}
gdf_error all_bitmask_on(gdf_valid_type * valid_out, gdf_size_type & out_null_count, gdf_size_type num_values, hipStream_t stream){
gdf_size_type num_chars_bitmask = ( ( num_values +( GDF_VALID_BITSIZE - 1)) / GDF_VALID_BITSIZE );
thrust::device_ptr<gdf_valid_type> valid_out_ptr = thrust::device_pointer_cast(valid_out);
gdf_valid_type max_char = 255;
thrust::fill(thrust::hip::par.on(stream),thrust::detail::make_normal_iterator(valid_out_ptr),thrust::detail::make_normal_iterator(valid_out_ptr + num_chars_bitmask),max_char);
//we have no nulls so set all the bits in gdf_valid_type to 1
out_null_count = 0;
return GDF_SUCCESS;
}
gdf_error apply_bitmask_to_bitmask(gdf_size_type & out_null_count, gdf_valid_type * valid_out, gdf_valid_type * valid_left, gdf_valid_type * valid_right,
hipStream_t stream, gdf_size_type num_values){
gdf_size_type num_chars_bitmask = ( ( num_values +( GDF_VALID_BITSIZE - 1)) / GDF_VALID_BITSIZE );
thrust::device_ptr<gdf_valid_type> valid_out_ptr = thrust::device_pointer_cast(valid_out);
thrust::device_ptr<gdf_valid_type> valid_left_ptr = thrust::device_pointer_cast(valid_left);
//here we are basically figuring out what is the last pointed to unsigned char that can contain part of the bitmask
thrust::device_ptr<gdf_valid_type> valid_left_end_ptr = thrust::device_pointer_cast(valid_left + num_chars_bitmask );
thrust::device_ptr<gdf_valid_type> valid_right_ptr = thrust::device_pointer_cast(valid_right);
thrust::transform(thrust::hip::par.on(stream), thrust::detail::make_normal_iterator(valid_left_ptr),
thrust::detail::make_normal_iterator(valid_left_end_ptr), thrust::detail::make_normal_iterator(valid_right_ptr),
thrust::detail::make_normal_iterator(valid_out_ptr), thrust::bit_and<gdf_valid_type>());
char * last_char = new char[1];
hipError_t error = hipMemcpyAsync(last_char,valid_out + ( num_chars_bitmask-1),sizeof(gdf_valid_type),hipMemcpyDeviceToHost,stream);
thrust::device_vector<gdf_valid_type> bit_mask_null_counts_device(bit_mask_null_counts);
//this permutation iterator makes it so that each char basically gets replaced with its number of null counts
//so if you sum up this perm iterator you add up all of the counts for null values per unsigned char
thrust::permutation_iterator<thrust::device_vector<gdf_valid_type>::iterator,thrust::detail::normal_iterator<thrust::device_ptr<gdf_valid_type> > >
null_counts_iter( bit_mask_null_counts_device.begin(),thrust::detail::make_normal_iterator(valid_out_ptr));
//you will notice that we subtract the number of zeros we found in the last character
out_null_count = thrust::reduce(thrust::hip::par.on(stream),null_counts_iter, null_counts_iter + num_chars_bitmask) - gdf_num_bits_zero_after_pos(*last_char,num_values % GDF_VALID_BITSIZE );
delete[] last_char;
return GDF_SUCCESS;
}
| c46eebcecc6af050839e785f81a45a4cbcf2532e.cu | #include <gdf/gdf.h>
#include <gdf/utils.h>
#include <gdf/errorutils.h>
#include <gdf/cffi/functions.h>
#include <cuda_runtime.h>
#include <vector>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/iterator_adaptor.h>
#include <thrust/device_vector.h>
/*
* bit_mask_null_counts Generated using the following code
#include <iostream>
int main()
{
for (int i = 0 ; i != 256 ; i++) {
int count = 0;
for (int p = 0 ; p != 8 ; p++) {
if (i & (1 << p)) {
count++;
}
}
std::cout<<(8-count)<<", ";
}
std::cout<<std::endl;
}
*/
std::vector<gdf_valid_type> bit_mask_null_counts = { 8, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 4, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 5, 4, 4, 3, 4, 3, 3, 2, 4, 3, 3, 2, 3, 2, 2, 1, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 5, 4, 4, 3, 4, 3, 3, 2, 4, 3, 3, 2, 3, 2, 2, 1, 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 5, 4, 4, 3, 4, 3, 3, 2, 4, 3, 3, 2, 3, 2, 2, 1, 5, 4, 4, 3, 4, 3, 3, 2, 4, 3, 3, 2, 3, 2, 2, 1, 4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0 };
unsigned char gdf_num_bits_zero_after_pos(unsigned char number, int pos){
//if pos == 0 then its aligned
if(pos == 0){
return 0;
}
unsigned char count = 0;
for (int p = pos ; p != 8 ; p++) {
if (number & (number << p)) {
count++;
}
}
return (8 - pos) - count;
}
gdf_error all_bitmask_on(gdf_valid_type * valid_out, gdf_size_type & out_null_count, gdf_size_type num_values, cudaStream_t stream){
gdf_size_type num_chars_bitmask = ( ( num_values +( GDF_VALID_BITSIZE - 1)) / GDF_VALID_BITSIZE );
thrust::device_ptr<gdf_valid_type> valid_out_ptr = thrust::device_pointer_cast(valid_out);
gdf_valid_type max_char = 255;
thrust::fill(thrust::cuda::par.on(stream),thrust::detail::make_normal_iterator(valid_out_ptr),thrust::detail::make_normal_iterator(valid_out_ptr + num_chars_bitmask),max_char);
//we have no nulls so set all the bits in gdf_valid_type to 1
out_null_count = 0;
return GDF_SUCCESS;
}
gdf_error apply_bitmask_to_bitmask(gdf_size_type & out_null_count, gdf_valid_type * valid_out, gdf_valid_type * valid_left, gdf_valid_type * valid_right,
cudaStream_t stream, gdf_size_type num_values){
gdf_size_type num_chars_bitmask = ( ( num_values +( GDF_VALID_BITSIZE - 1)) / GDF_VALID_BITSIZE );
thrust::device_ptr<gdf_valid_type> valid_out_ptr = thrust::device_pointer_cast(valid_out);
thrust::device_ptr<gdf_valid_type> valid_left_ptr = thrust::device_pointer_cast(valid_left);
//here we are basically figuring out what is the last pointed to unsigned char that can contain part of the bitmask
thrust::device_ptr<gdf_valid_type> valid_left_end_ptr = thrust::device_pointer_cast(valid_left + num_chars_bitmask );
thrust::device_ptr<gdf_valid_type> valid_right_ptr = thrust::device_pointer_cast(valid_right);
thrust::transform(thrust::cuda::par.on(stream), thrust::detail::make_normal_iterator(valid_left_ptr),
thrust::detail::make_normal_iterator(valid_left_end_ptr), thrust::detail::make_normal_iterator(valid_right_ptr),
thrust::detail::make_normal_iterator(valid_out_ptr), thrust::bit_and<gdf_valid_type>());
char * last_char = new char[1];
cudaError_t error = cudaMemcpyAsync(last_char,valid_out + ( num_chars_bitmask-1),sizeof(gdf_valid_type),cudaMemcpyDeviceToHost,stream);
thrust::device_vector<gdf_valid_type> bit_mask_null_counts_device(bit_mask_null_counts);
//this permutation iterator makes it so that each char basically gets replaced with its number of null counts
//so if you sum up this perm iterator you add up all of the counts for null values per unsigned char
thrust::permutation_iterator<thrust::device_vector<gdf_valid_type>::iterator,thrust::detail::normal_iterator<thrust::device_ptr<gdf_valid_type> > >
null_counts_iter( bit_mask_null_counts_device.begin(),thrust::detail::make_normal_iterator(valid_out_ptr));
//you will notice that we subtract the number of zeros we found in the last character
out_null_count = thrust::reduce(thrust::cuda::par.on(stream),null_counts_iter, null_counts_iter + num_chars_bitmask) - gdf_num_bits_zero_after_pos(*last_char,num_values % GDF_VALID_BITSIZE );
delete[] last_char;
return GDF_SUCCESS;
}
|
28fc446bf1870e618436c98bdd31a6f0c741a080.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// Include files
// Parameters
#define N_ATOMS 343
#define MASS_ATOM 1.0f
#define time_step 0.01f
#define L 10.5f
#define T 0.728f
#define NUM_STEPS 10000
const int BLOCK_SIZE = 1024;
//const int L = ;
const int scheme = 1; // 0 for explicit, 1 for implicit
/*************************************************************************************************************/
/************* INITIALIZATION CODE **********/
/*************************************************************************************************************/
__device__ float PutInBox(float r){
if (fabs(r) > L / 2.0)
r += (2 * (r < 0) - 1)*ceil((fabs(r) - L / 2.0f) / L)*L;
return r;
}
__global__ void kinematics(float* positions, float* force, float* vel, int len){
int tx = threadIdx.x;
int bx = blockIdx.x;
int index = bx*blockDim.x + tx;
float tempr;
//if (index == 0){ printf("You have been trolled! \n"); }
if (index < len){
tempr = positions[index] + 0.5f * force[index] / MASS_ATOM * time_step*time_step + vel[index] * time_step;
positions[index] = PutInBox(tempr);
vel[index] += force[index] / MASS_ATOM * time_step;
}
} | 28fc446bf1870e618436c98bdd31a6f0c741a080.cu | #include "includes.h"
// Include files
// Parameters
#define N_ATOMS 343
#define MASS_ATOM 1.0f
#define time_step 0.01f
#define L 10.5f
#define T 0.728f
#define NUM_STEPS 10000
const int BLOCK_SIZE = 1024;
//const int L = ;
const int scheme = 1; // 0 for explicit, 1 for implicit
/*************************************************************************************************************/
/************* INITIALIZATION CODE **********/
/*************************************************************************************************************/
__device__ float PutInBox(float r){
if (fabs(r) > L / 2.0)
r += (2 * (r < 0) - 1)*ceil((fabs(r) - L / 2.0f) / L)*L;
return r;
}
__global__ void kinematics(float* positions, float* force, float* vel, int len){
int tx = threadIdx.x;
int bx = blockIdx.x;
int index = bx*blockDim.x + tx;
float tempr;
//if (index == 0){ printf("You have been trolled! \n"); }
if (index < len){
tempr = positions[index] + 0.5f * force[index] / MASS_ATOM * time_step*time_step + vel[index] * time_step;
positions[index] = PutInBox(tempr);
vel[index] += force[index] / MASS_ATOM * time_step;
}
} |
6246af87a0369f1daa3b318248c7ced104ae5f9f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/prelu_op.h"
#include "caffe2/utils/cub_namespace.cuh"
#include <hipcub/hipcub.hpp>
namespace caffe2 {
namespace {
template <typename T>
__global__ void PReluKernel(const int N, const T* X, const T* W, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = (X[i] > 0) * X[i] + (X[i] < 0) * X[i] * W[0];
}
}
template <typename T>
__global__ void PReluKernelNCHW(
const int N,
const int C,
const int dim,
const T* X,
const T* W,
T* Y) {
CUDA_1D_KERNEL_LOOP(i, N * C * dim) {
int c = (i / dim) % C;
Y[i] = (X[i] > 0) * X[i] + (X[i] < 0) * X[i] * W[c];
}
}
template <typename T>
__global__ void
PReluKernelNHWC(const int nitems, const int C, const T* X, const T* W, T* Y) {
CUDA_1D_KERNEL_LOOP(i, nitems) {
int c = i % C;
Y[i] = (X[i] > 0) * X[i] + (X[i] < 0) * X[i] * W[c];
}
}
template <typename T>
__global__ void
PReluGradientKernel(const int N, const T* X, const T* W, const T* dY, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = (X[i] > 0) * dY[i] + (X[i] <= 0) * dY[i] * W[0];
}
}
template <typename T>
__global__ void PReluGradientKernelNCHW(
const int N,
const int C,
const int dim,
const T* X,
const T* W,
const T* dY,
T* dX) {
CUDA_1D_KERNEL_LOOP(i, N * C * dim) {
int c = (i / dim) % C;
dX[i] = (X[i] > 0) * dY[i] + (X[i] <= 0) * dY[i] * W[c];
}
}
template <typename T>
__global__ void PReluGradientKernelNHWC(
const int nitems,
const int C,
const T* X,
const T* W,
const T* dY,
T* dX) {
CUDA_1D_KERNEL_LOOP(i, nitems) {
int c = i % C;
dX[i] = (X[i] > 0) * dY[i] + (X[i] <= 0) * dY[i] * W[c];
}
}
template <typename T>
__global__ void PReluSharedWGradientKernelNCHW(
const int num_items,
const T* Xdata,
const T* dYdata,
T* dW) {
T wsum = 0.0;
for (int i = threadIdx.x; i < num_items; i += blockDim.x) {
wsum += (Xdata[i] <= 0) * dYdata[i] * Xdata[i];
}
typedef hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T sum = BlockReduce(temp_storage).Sum(wsum);
if (threadIdx.x == 0) {
*dW = sum;
}
}
template <typename T>
__global__ void PReluWGradientKernelNCHW(
const int C,
const int N,
const int num_items,
const T* Xdata,
const T* dYdata,
T* dW) {
int c = blockIdx.x;
T wsum = 0.0;
int items_per_channel = num_items / C;
int items_per_sample_channel = items_per_channel / N;
for (int i = threadIdx.x; i < items_per_channel; i += blockDim.x) {
// TODO: simplify
int n = i / items_per_sample_channel;
int ii = n * items_per_sample_channel * C + c * items_per_sample_channel +
i % items_per_sample_channel;
wsum += (Xdata[ii] <= 0) * dYdata[ii] * Xdata[ii];
}
typedef hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T sum = BlockReduce(temp_storage).Sum(wsum);
if (threadIdx.x == 0) {
dW[c] = sum;
}
}
template <typename T>
__global__ void PReluWGradientKernelNHWC(
const int C,
const int N,
const int num_items,
const T* Xdata,
const T* dYdata,
T* dW) {
int c = blockIdx.x;
T wsum = 0.0;
int items_per_channel = num_items / C;
for (int i = threadIdx.x; i < items_per_channel; i += blockDim.x) {
int ii = i * C + c;
wsum += (Xdata[ii] <= 0) * dYdata[ii] * Xdata[ii];
}
typedef hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T sum = BlockReduce(temp_storage).Sum(wsum);
if (threadIdx.x == 0) {
dW[c] = sum;
}
}
} // namespace
template <>
bool PReluOp<float, CUDAContext>::RunOnDevice() {
const auto& X = Input(0);
const auto& W = Input(1);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
const auto* Xdata = X.data<float>();
const auto* Wdata = W.data<float>();
auto* Ydata = Y->template mutable_data<float>();
const auto C = order_ == StorageOrder::NCHW ? X.dim(1) : X.dim(X.dim() - 1);
const auto C_shared = (W.numel() == 1);
if (!C_shared) {
CAFFE_ENFORCE_EQ(C, W.numel());
}
if (C_shared) {
hipLaunchKernelGGL(( PReluKernel),
dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), X.numel(), Xdata, Wdata, Ydata);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
// non-shared case.
switch (order_) {
case StorageOrder::NCHW: {
const auto N = X.dim(0);
const auto dim = X.size_from_dim(2);
CHECK(N * C * dim == X.numel());
hipLaunchKernelGGL(( PReluKernelNCHW),
dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, C, dim, Xdata, Wdata, Ydata);
C10_HIP_KERNEL_LAUNCH_CHECK();
break;
}
case StorageOrder::NHWC: {
hipLaunchKernelGGL(( PReluKernelNHWC),
dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), X.numel(), C, Xdata, Wdata, Ydata);
C10_HIP_KERNEL_LAUNCH_CHECK();
break;
}
default:
CAFFE_THROW("Unknown storage order: ", order_);
}
return true;
}
template <>
bool PReluGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto& X = Input(2);
auto& W = Input(3);
CAFFE_ENFORCE(&Y != &X, "Cannot backpropagate through an in-place PReLU");
DCHECK_EQ(dY.numel(), Y.numel());
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
auto* dW = Output(1, W.sizes(), at::dtype<float>());
const auto C = order_ == StorageOrder::NCHW ? X.dim(1) : X.dim(X.dim() - 1);
const auto C_shared = (W.numel() == 1);
const float* Ydata = Y.data<float>();
const float* dYdata = dY.data<float>();
const float* Xdata = X.data<float>();
const float* Wdata = W.data<float>();
float* dXdata = dX->template mutable_data<float>();
float* dWdata = dW->template mutable_data<float>();
int N = Y.dim(0);
if (C_shared) {
hipLaunchKernelGGL(( PReluSharedWGradientKernelNCHW),
dim3(1),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), X.numel(), Xdata, dYdata, dWdata);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( PReluGradientKernel),
dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), X.numel(), Xdata, Wdata, dYdata, dXdata);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
// non-shared case.
switch (order_) {
case StorageOrder::NCHW: {
const auto dim = Y.size_from_dim(2);
hipLaunchKernelGGL(( PReluWGradientKernelNCHW),
dim3(C),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), C, N, X.numel(), Xdata, dYdata, dWdata);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( PReluGradientKernelNCHW),
dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, C, dim, Xdata, Wdata, dYdata, dXdata);
C10_HIP_KERNEL_LAUNCH_CHECK();
break;
}
case StorageOrder::NHWC: {
hipLaunchKernelGGL(( PReluWGradientKernelNHWC),
dim3(C),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), C, N, X.numel(), Xdata, dYdata, dWdata);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( PReluGradientKernelNHWC),
dim3(CAFFE_GET_BLOCKS(Y.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), X.numel(), C, Xdata, Wdata, dYdata, dXdata);
C10_HIP_KERNEL_LAUNCH_CHECK();
break;
}
default:
CAFFE_THROW("Unknown storage order: ", order_);
}
return true;
}
REGISTER_CUDA_OPERATOR(PRelu, PReluOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(PReluGradient, PReluGradientOp<float, CUDAContext>);
} // namespace caffe2
| 6246af87a0369f1daa3b318248c7ced104ae5f9f.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/prelu_op.h"
#include "caffe2/utils/cub_namespace.cuh"
#include <cub/block/block_reduce.cuh>
namespace caffe2 {
namespace {
template <typename T>
__global__ void PReluKernel(const int N, const T* X, const T* W, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = (X[i] > 0) * X[i] + (X[i] < 0) * X[i] * W[0];
}
}
template <typename T>
__global__ void PReluKernelNCHW(
const int N,
const int C,
const int dim,
const T* X,
const T* W,
T* Y) {
CUDA_1D_KERNEL_LOOP(i, N * C * dim) {
int c = (i / dim) % C;
Y[i] = (X[i] > 0) * X[i] + (X[i] < 0) * X[i] * W[c];
}
}
template <typename T>
__global__ void
PReluKernelNHWC(const int nitems, const int C, const T* X, const T* W, T* Y) {
CUDA_1D_KERNEL_LOOP(i, nitems) {
int c = i % C;
Y[i] = (X[i] > 0) * X[i] + (X[i] < 0) * X[i] * W[c];
}
}
template <typename T>
__global__ void
PReluGradientKernel(const int N, const T* X, const T* W, const T* dY, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = (X[i] > 0) * dY[i] + (X[i] <= 0) * dY[i] * W[0];
}
}
template <typename T>
__global__ void PReluGradientKernelNCHW(
const int N,
const int C,
const int dim,
const T* X,
const T* W,
const T* dY,
T* dX) {
CUDA_1D_KERNEL_LOOP(i, N * C * dim) {
int c = (i / dim) % C;
dX[i] = (X[i] > 0) * dY[i] + (X[i] <= 0) * dY[i] * W[c];
}
}
template <typename T>
__global__ void PReluGradientKernelNHWC(
const int nitems,
const int C,
const T* X,
const T* W,
const T* dY,
T* dX) {
CUDA_1D_KERNEL_LOOP(i, nitems) {
int c = i % C;
dX[i] = (X[i] > 0) * dY[i] + (X[i] <= 0) * dY[i] * W[c];
}
}
template <typename T>
__global__ void PReluSharedWGradientKernelNCHW(
const int num_items,
const T* Xdata,
const T* dYdata,
T* dW) {
T wsum = 0.0;
for (int i = threadIdx.x; i < num_items; i += blockDim.x) {
wsum += (Xdata[i] <= 0) * dYdata[i] * Xdata[i];
}
typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T sum = BlockReduce(temp_storage).Sum(wsum);
if (threadIdx.x == 0) {
*dW = sum;
}
}
template <typename T>
__global__ void PReluWGradientKernelNCHW(
const int C,
const int N,
const int num_items,
const T* Xdata,
const T* dYdata,
T* dW) {
int c = blockIdx.x;
T wsum = 0.0;
int items_per_channel = num_items / C;
int items_per_sample_channel = items_per_channel / N;
for (int i = threadIdx.x; i < items_per_channel; i += blockDim.x) {
// TODO: simplify
int n = i / items_per_sample_channel;
int ii = n * items_per_sample_channel * C + c * items_per_sample_channel +
i % items_per_sample_channel;
wsum += (Xdata[ii] <= 0) * dYdata[ii] * Xdata[ii];
}
typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T sum = BlockReduce(temp_storage).Sum(wsum);
if (threadIdx.x == 0) {
dW[c] = sum;
}
}
template <typename T>
__global__ void PReluWGradientKernelNHWC(
const int C,
const int N,
const int num_items,
const T* Xdata,
const T* dYdata,
T* dW) {
int c = blockIdx.x;
T wsum = 0.0;
int items_per_channel = num_items / C;
for (int i = threadIdx.x; i < items_per_channel; i += blockDim.x) {
int ii = i * C + c;
wsum += (Xdata[ii] <= 0) * dYdata[ii] * Xdata[ii];
}
typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T sum = BlockReduce(temp_storage).Sum(wsum);
if (threadIdx.x == 0) {
dW[c] = sum;
}
}
} // namespace
template <>
bool PReluOp<float, CUDAContext>::RunOnDevice() {
const auto& X = Input(0);
const auto& W = Input(1);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
const auto* Xdata = X.data<float>();
const auto* Wdata = W.data<float>();
auto* Ydata = Y->template mutable_data<float>();
const auto C = order_ == StorageOrder::NCHW ? X.dim(1) : X.dim(X.dim() - 1);
const auto C_shared = (W.numel() == 1);
if (!C_shared) {
CAFFE_ENFORCE_EQ(C, W.numel());
}
if (C_shared) {
PReluKernel<<<
CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(X.numel(), Xdata, Wdata, Ydata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
// non-shared case.
switch (order_) {
case StorageOrder::NCHW: {
const auto N = X.dim(0);
const auto dim = X.size_from_dim(2);
CHECK(N * C * dim == X.numel());
PReluKernelNCHW<<<
CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, C, dim, Xdata, Wdata, Ydata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
break;
}
case StorageOrder::NHWC: {
PReluKernelNHWC<<<
CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(X.numel(), C, Xdata, Wdata, Ydata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
break;
}
default:
CAFFE_THROW("Unknown storage order: ", order_);
}
return true;
}
template <>
bool PReluGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto& X = Input(2);
auto& W = Input(3);
CAFFE_ENFORCE(&Y != &X, "Cannot backpropagate through an in-place PReLU");
DCHECK_EQ(dY.numel(), Y.numel());
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
auto* dW = Output(1, W.sizes(), at::dtype<float>());
const auto C = order_ == StorageOrder::NCHW ? X.dim(1) : X.dim(X.dim() - 1);
const auto C_shared = (W.numel() == 1);
const float* Ydata = Y.data<float>();
const float* dYdata = dY.data<float>();
const float* Xdata = X.data<float>();
const float* Wdata = W.data<float>();
float* dXdata = dX->template mutable_data<float>();
float* dWdata = dW->template mutable_data<float>();
int N = Y.dim(0);
if (C_shared) {
PReluSharedWGradientKernelNCHW<<<
1,
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(X.numel(), Xdata, dYdata, dWdata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
PReluGradientKernel<<<
CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(X.numel(), Xdata, Wdata, dYdata, dXdata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
// non-shared case.
switch (order_) {
case StorageOrder::NCHW: {
const auto dim = Y.size_from_dim(2);
PReluWGradientKernelNCHW<<<
C,
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(C, N, X.numel(), Xdata, dYdata, dWdata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
PReluGradientKernelNCHW<<<
CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, C, dim, Xdata, Wdata, dYdata, dXdata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
break;
}
case StorageOrder::NHWC: {
PReluWGradientKernelNHWC<<<
C,
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(C, N, X.numel(), Xdata, dYdata, dWdata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
PReluGradientKernelNHWC<<<
CAFFE_GET_BLOCKS(Y.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(X.numel(), C, Xdata, Wdata, dYdata, dXdata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
break;
}
default:
CAFFE_THROW("Unknown storage order: ", order_);
}
return true;
}
REGISTER_CUDA_OPERATOR(PRelu, PReluOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(PReluGradient, PReluGradientOp<float, CUDAContext>);
} // namespace caffe2
|
da8b78b7d96175f666a0f6f57b3d19f97171dd5e.hip | // !!! This is a file automatically generated by hipify!!!
/* compile with: nvcc -O3 hw1.cu -o hw1 */
#include <stdio.h>
#include <sys/time.h>
#include <hip/hip_runtime_api.h>
#define IMG_DIMENSION 32
#define N_IMG_PAIRS 10000
#define IMAGE_SIZE 1024
typedef unsigned char uchar;
#define OUT
#define CUDA_CHECK(f) do { \
hipError_t e = f; \
if (e != hipSuccess) { \
printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(e)); \
exit(1); \
} \
} while (0)
#define SQR(a) ((a) * (a))
double static inline get_time_msec(void) {
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1e+3 + t.tv_usec * 1e-3;
}
/* we won't load actual files. just fill the images with random bytes */
void load_image_pairs(uchar *images1, uchar *images2) {
srand(0);
for (int i = 0; i < N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION; i++) {
images1[i] = rand() % 256;
images2[i] = rand() % 256;
}
}
__host__ __device__ bool is_in_image_bounds(int i, int j) {
return (i >= 0) && (i < IMG_DIMENSION) && (j >= 0) && (j < IMG_DIMENSION);
}
__host__ __device__ uchar local_binary_pattern(uchar *image, int i, int j) {
uchar center = image[i * IMG_DIMENSION + j];
uchar pattern = 0;
if (is_in_image_bounds(i - 1, j - 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j - 1)] >= center) << 7;
if (is_in_image_bounds(i - 1, j )) pattern |= (image[(i - 1) * IMG_DIMENSION + (j )] >= center) << 6;
if (is_in_image_bounds(i - 1, j + 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j + 1)] >= center) << 5;
if (is_in_image_bounds(i , j + 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j + 1)] >= center) << 4;
if (is_in_image_bounds(i + 1, j + 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j + 1)] >= center) << 3;
if (is_in_image_bounds(i + 1, j )) pattern |= (image[(i + 1) * IMG_DIMENSION + (j )] >= center) << 2;
if (is_in_image_bounds(i + 1, j - 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j - 1)] >= center) << 1;
if (is_in_image_bounds(i , j - 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j - 1)] >= center) << 0;
return pattern;
}
//__device__ void zero
void image_to_histogram(uchar *image, int *histogram) {
memset(histogram, 0, sizeof(int) * 256);
for (int i = 0; i < IMG_DIMENSION; i++) {
for (int j = 0; j < IMG_DIMENSION; j++) {
uchar pattern = local_binary_pattern(image, i, j);
histogram[pattern]++;
}
}
}
double histogram_distance(int *h1, int *h2) {
/* we'll use the chi-square distance */
double distance = 0;
for (int i = 0; i < 256; i++) {
if (h1[i] + h2[i] != 0) {
distance += ((double)SQR(h1[i] - h2[i])) / (h1[i] + h2[i]);
}
}
return distance;
}
/* Your __device__ functions and __global__ kernels here */
/* ... */
__global__ void image_to_hisogram_simple(uchar *image1, OUT int *hist1) {
int i = threadIdx.x;
int j = threadIdx.y;
uchar pattern = local_binary_pattern(image1, i, j);
atomicAdd(hist1+pattern,1);
// __threadfence();
}
__global__ void histogram_distance(int *hist1, int *hist2, OUT double *distance) {
*distance=0;
//__threadfence();
int i = threadIdx.x;
if (hist1[i] + hist2[i] != 0){
double temp = (double)((double)SQR(hist1[i] - hist2[i])) / (hist1[i] + hist2[i]);
atomicAdd((float*)distance,(float)temp);
}
}
__global__ void image_to_hisogram_shared(uchar *image1, OUT int *hist1) {
int i = threadIdx.x;
int j = threadIdx.y;
__shared__ uchar im[IMAGE_SIZE];
__shared__ int sharedHist[256];
if (i*32+j <256){
sharedHist[i*32+j] = 0;
};
im[i*32+j]=image1[i*32+j];
threadfence();
uchar pattern = local_binary_pattern(im, i, j);
atomicAdd(sharedHist+pattern,1);
threadfence();
if (i*32+j <256){
hist1[i*32+j] = sharedHist[i*32+j];
};
}
__global__ void image_to_hisogram_batched(uchar *images, OUT int *hist1) {
int i = threadIdx.x;
int j = threadIdx.y;
int k = blockIdx.x;
__shared__ uchar im[IMAGE_SIZE];
im[j+32*i] = images[k*IMAGE_SIZE+j+32*i];
__shared__ int sharedHist[256];
if (i*32+j <256){
sharedHist[i*32+j] = 0;
};
threadfence();
uchar pattern = local_binary_pattern(im, i, j);
atomicAdd(sharedHist+pattern,1);
if (i*32+j <256){
hist1[k*256+i*32+j] = sharedHist[i*32+j];
};
syncthreads();
}
__global__ void histogram_distance_batched(int *hist1, int *hist2, OUT double *distance) {
*distance=0;
//__threadfence();
int i = threadIdx.x;
int k = blockIdx.x;
if (hist1[256*k+i] + hist2[256*k+i] != 0){
double temp = (double)((double)SQR(hist1[256*k+i] - hist2[256*k+i])) / (hist1[256*k+i] + hist2[256*k+i]);
atomicAdd((float*)distance,(float)temp);
};
}
int main() {
uchar *images1; /* we concatenate all images in one huge array */
uchar *images2;
CUDA_CHECK( hipHostMalloc(&images1, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
CUDA_CHECK( hipHostMalloc(&images2, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
load_image_pairs(images1, images2);
double t_start, t_finish;
double total_distance;
/* using CPU */
printf("\n=== CPU ===\n");
int histogram1[256];
int histogram2[256];
t_start = get_time_msec();
for (int i = 0; i < N_IMG_PAIRS; i++) {
image_to_histogram(&images1[i * IMG_DIMENSION * IMG_DIMENSION], histogram1);
image_to_histogram(&images2[i * IMG_DIMENSION * IMG_DIMENSION], histogram2);
total_distance += histogram_distance(histogram1, histogram2);
}
t_finish = get_time_msec();
printf("average distance between images %f\n", total_distance / N_IMG_PAIRS);
printf("total time %f [msec]\n", t_finish - t_start);
/* using GPU task-serial */
printf("\n=== GPU Task Serial ===\n");
do {
//* do {} while (0): to keep variables inside this block in their own scope. remove if you prefer otherwise *//*
//* Your Code Here *//*
uchar *gpu_image1, *gpu_image2; // TODO: allocate with hipMalloc
CUDA_CHECK(hipMalloc(&gpu_image1,1024*sizeof(uchar)));
CUDA_CHECK(hipMalloc(&gpu_image2,1024*sizeof(uchar)));
int *gpu_hist1, *gpu_hist2; // TODO: allocate with hipMalloc
CUDA_CHECK(hipMalloc(&gpu_hist1,256*sizeof(int)));
CUDA_CHECK(hipMalloc(&gpu_hist2,256*sizeof(int)));
CUDA_CHECK(hipMemset(gpu_hist1,0,256*sizeof(int)));
CUDA_CHECK(hipMemset(gpu_hist2,0,256*sizeof(int)));
double *gpu_hist_distance; //TODO: allocate with hipMalloc
CUDA_CHECK(hipMalloc(&gpu_hist_distance,sizeof(double)));
double cpu_hist_distance;
t_start = get_time_msec();
hipProfilerStart();
for (int i = 0; i < N_IMG_PAIRS; i++) {
dim3 threadsPerBlock(32,32);
// TODO: copy relevant images from images1 and images2 to gpu_image1 and gpu_image2
CUDA_CHECK(hipMemcpy(gpu_image1, images1+i*1024, 1024 * sizeof(uchar), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(gpu_image2, images2+ i*1024, 1024 * sizeof(uchar), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( image_to_hisogram_simple), dim3(1), dim3(threadsPerBlock), 0, 0, gpu_image2, gpu_hist2);
hipLaunchKernelGGL(( image_to_hisogram_simple), dim3(1), dim3(threadsPerBlock), 0, 0, gpu_image1, gpu_hist1);
hipLaunchKernelGGL(( histogram_distance), dim3(1), dim3(256), 0, 0, gpu_hist1, gpu_hist2, gpu_hist_distance);
//TODO: copy gpu_hist_distance to cpu_hist_distance
CUDA_CHECK(hipMemcpy(&cpu_hist_distance, gpu_hist_distance, sizeof(double), hipMemcpyDeviceToHost));
total_distance += cpu_hist_distance;
}
hipProfilerStop();
CUDA_CHECK(hipFree(gpu_hist1));
CUDA_CHECK(hipFree(gpu_hist2));
CUDA_CHECK(hipFree(gpu_image1));
CUDA_CHECK(hipFree(gpu_image2));
CUDA_CHECK(hipDeviceSynchronize());
t_finish = get_time_msec();
printf("average distance between images %f\n", total_distance / N_IMG_PAIRS);
printf("total time %f [msec]\n", t_finish - t_start);
} while (0);
/* using GPU task-serial + images and histograms in shared memory */
printf("\n=== GPU Task Serial with shared memory ===\n");
do { /* do {} while (0): to keep variables inside this block in their own scope. remove if you prefer otherwise */
/* Your Code Here */
uchar *gpu_image1_shared;
uchar *gpu_image2_shared; // TODO: allocate with hipMalloc
CUDA_CHECK(hipMalloc(&gpu_image1_shared,1024*sizeof(uchar)));
CUDA_CHECK(hipMalloc(&gpu_image2_shared,1024*sizeof(uchar)));
int *gpu_hist1;
int *gpu_hist2; // TODO: allocate with hipMalloc
CUDA_CHECK(hipMalloc(&gpu_hist1,256*sizeof(int)));
CUDA_CHECK(hipMalloc(&gpu_hist2,256*sizeof(int)));
//hipMemset(&gpu_hist1,0,256*sizeof(int));
//hipMemset(&gpu_hist2,0,256*sizeof(int));
double *gpu_hist_distance; //TODO: allocate with hipMalloc
CUDA_CHECK(hipMalloc(&gpu_hist_distance,sizeof(double)));
double cpu_hist_distance;
t_start = get_time_msec();
for (int i = 0; i < N_IMG_PAIRS; i++) {
dim3 threadsPerBlock(32,32);
// TODO: copy relevant images from images1 and images2 to gpu_image1 and gpu_image2
CUDA_CHECK(hipMemcpy(gpu_image1_shared, images1+i*1024, 1024 * sizeof(uchar), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(gpu_image2_shared, images2+i*1024, 1024 * sizeof(uchar), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( image_to_hisogram_shared), dim3(1), dim3(threadsPerBlock), 0, 0, gpu_image1_shared, gpu_hist1);
hipLaunchKernelGGL(( image_to_hisogram_shared), dim3(1), dim3(threadsPerBlock), 0, 0, gpu_image2_shared, gpu_hist2);
//->move to global hiat
hipLaunchKernelGGL(( histogram_distance), dim3(1), dim3(256), 0, 0, gpu_hist1, gpu_hist2, gpu_hist_distance);
//TODO: copy gpu_hist_distance to cpu_hist_distance
CUDA_CHECK(hipMemcpy(&cpu_hist_distance, gpu_hist_distance, sizeof(double), hipMemcpyDeviceToHost));
total_distance += cpu_hist_distance;
}
CUDA_CHECK(hipDeviceSynchronize());
CUDA_CHECK(hipFree(gpu_hist1));
CUDA_CHECK(hipFree(gpu_hist2));
CUDA_CHECK(hipFree(gpu_image1_shared));
CUDA_CHECK(hipFree(gpu_image2_shared));
t_finish = get_time_msec();
} while (0);
printf("average distance between images %f\n", total_distance / N_IMG_PAIRS);
printf("total time %f [msec]\n", t_finish - t_start);
/* using GPU + batching */
printf("\n=== GPU Batching ===\n");
do {
//* do {} while (0): to keep variables inside this block in their own scope. remove if you prefer otherwise *//*
//* Your Code Here *//*
uchar *gpu_image1, *gpu_image2; // TODO: allocate with hipMalloc
CUDA_CHECK(hipMalloc(&gpu_image1,N_IMG_PAIRS*1024*sizeof(uchar)));
CUDA_CHECK(hipMalloc(&gpu_image2,N_IMG_PAIRS*1024*sizeof(uchar)));
int *gpu_hist1, *gpu_hist2; // TODO: allocate with hipMalloc
CUDA_CHECK(hipMalloc(&gpu_hist1,N_IMG_PAIRS*256*sizeof(int)));
CUDA_CHECK(hipMalloc(&gpu_hist2,N_IMG_PAIRS*256*sizeof(int)));
CUDA_CHECK(hipMemset(gpu_hist1,0,N_IMG_PAIRS*256*sizeof(int)));
CUDA_CHECK(hipMemset(gpu_hist2,0,N_IMG_PAIRS*256*sizeof(int)));
double *gpu_hist_distance; //TODO: allocate with hipMalloc
CUDA_CHECK(hipMalloc(&gpu_hist_distance,sizeof(double)));
double cpu_hist_distance;
t_start = get_time_msec();
dim3 threadsPerBlock(32,32);
// TODO: copy relevant images from images1 and images2 to gpu_image1 and gpu_image2
CUDA_CHECK(hipMemcpy(gpu_image1, images1, 1024 *N_IMG_PAIRS* sizeof(uchar), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(gpu_image2, images2, 1024 *N_IMG_PAIRS* sizeof(uchar), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( image_to_hisogram_batched), dim3(N_IMG_PAIRS), dim3(threadsPerBlock), 0, 0, gpu_image2, gpu_hist2);
hipLaunchKernelGGL(( image_to_hisogram_batched), dim3(N_IMG_PAIRS), dim3(threadsPerBlock), 0, 0, gpu_image1, gpu_hist1);
hipLaunchKernelGGL(( histogram_distance_batched), dim3(N_IMG_PAIRS), dim3(256), 0, 0, gpu_hist1, gpu_hist2, gpu_hist_distance);
//TODO: copy gpu_hist_distance to cpu_hist_distance
CUDA_CHECK(hipMemcpy(&cpu_hist_distance, gpu_hist_distance, sizeof(double), hipMemcpyDeviceToHost));
total_distance += cpu_hist_distance;
CUDA_CHECK(hipDeviceSynchronize());
CUDA_CHECK(hipFree(gpu_hist1));
CUDA_CHECK(hipFree(gpu_hist2));
CUDA_CHECK(hipFree(gpu_image1));
CUDA_CHECK(hipFree(gpu_image2));
t_finish = get_time_msec();
} while (0);
/* Your Code Here */
printf("average distance between images %f\n", total_distance / N_IMG_PAIRS);
printf("total time %f [msec]\n", t_finish - t_start);
return 0;
}
//have a great day :) | da8b78b7d96175f666a0f6f57b3d19f97171dd5e.cu | /* compile with: nvcc -O3 hw1.cu -o hw1 */
#include <stdio.h>
#include <sys/time.h>
#include <cuda_profiler_api.h>
#define IMG_DIMENSION 32
#define N_IMG_PAIRS 10000
#define IMAGE_SIZE 1024
typedef unsigned char uchar;
#define OUT
#define CUDA_CHECK(f) do { \
cudaError_t e = f; \
if (e != cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
exit(1); \
} \
} while (0)
#define SQR(a) ((a) * (a))
double static inline get_time_msec(void) {
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1e+3 + t.tv_usec * 1e-3;
}
/* we won't load actual files. just fill the images with random bytes */
void load_image_pairs(uchar *images1, uchar *images2) {
srand(0);
for (int i = 0; i < N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION; i++) {
images1[i] = rand() % 256;
images2[i] = rand() % 256;
}
}
__host__ __device__ bool is_in_image_bounds(int i, int j) {
return (i >= 0) && (i < IMG_DIMENSION) && (j >= 0) && (j < IMG_DIMENSION);
}
__host__ __device__ uchar local_binary_pattern(uchar *image, int i, int j) {
uchar center = image[i * IMG_DIMENSION + j];
uchar pattern = 0;
if (is_in_image_bounds(i - 1, j - 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j - 1)] >= center) << 7;
if (is_in_image_bounds(i - 1, j )) pattern |= (image[(i - 1) * IMG_DIMENSION + (j )] >= center) << 6;
if (is_in_image_bounds(i - 1, j + 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j + 1)] >= center) << 5;
if (is_in_image_bounds(i , j + 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j + 1)] >= center) << 4;
if (is_in_image_bounds(i + 1, j + 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j + 1)] >= center) << 3;
if (is_in_image_bounds(i + 1, j )) pattern |= (image[(i + 1) * IMG_DIMENSION + (j )] >= center) << 2;
if (is_in_image_bounds(i + 1, j - 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j - 1)] >= center) << 1;
if (is_in_image_bounds(i , j - 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j - 1)] >= center) << 0;
return pattern;
}
//__device__ void zero
void image_to_histogram(uchar *image, int *histogram) {
memset(histogram, 0, sizeof(int) * 256);
for (int i = 0; i < IMG_DIMENSION; i++) {
for (int j = 0; j < IMG_DIMENSION; j++) {
uchar pattern = local_binary_pattern(image, i, j);
histogram[pattern]++;
}
}
}
double histogram_distance(int *h1, int *h2) {
/* we'll use the chi-square distance */
double distance = 0;
for (int i = 0; i < 256; i++) {
if (h1[i] + h2[i] != 0) {
distance += ((double)SQR(h1[i] - h2[i])) / (h1[i] + h2[i]);
}
}
return distance;
}
/* Your __device__ functions and __global__ kernels here */
/* ... */
__global__ void image_to_hisogram_simple(uchar *image1, OUT int *hist1) {
int i = threadIdx.x;
int j = threadIdx.y;
uchar pattern = local_binary_pattern(image1, i, j);
atomicAdd(hist1+pattern,1);
// __threadfence();
}
__global__ void histogram_distance(int *hist1, int *hist2, OUT double *distance) {
*distance=0;
//__threadfence();
int i = threadIdx.x;
if (hist1[i] + hist2[i] != 0){
double temp = (double)((double)SQR(hist1[i] - hist2[i])) / (hist1[i] + hist2[i]);
atomicAdd((float*)distance,(float)temp);
}
}
__global__ void image_to_hisogram_shared(uchar *image1, OUT int *hist1) {
int i = threadIdx.x;
int j = threadIdx.y;
__shared__ uchar im[IMAGE_SIZE];
__shared__ int sharedHist[256];
if (i*32+j <256){
sharedHist[i*32+j] = 0;
};
im[i*32+j]=image1[i*32+j];
threadfence();
uchar pattern = local_binary_pattern(im, i, j);
atomicAdd(sharedHist+pattern,1);
threadfence();
if (i*32+j <256){
hist1[i*32+j] = sharedHist[i*32+j];
};
}
__global__ void image_to_hisogram_batched(uchar *images, OUT int *hist1) {
int i = threadIdx.x;
int j = threadIdx.y;
int k = blockIdx.x;
__shared__ uchar im[IMAGE_SIZE];
im[j+32*i] = images[k*IMAGE_SIZE+j+32*i];
__shared__ int sharedHist[256];
if (i*32+j <256){
sharedHist[i*32+j] = 0;
};
threadfence();
uchar pattern = local_binary_pattern(im, i, j);
atomicAdd(sharedHist+pattern,1);
if (i*32+j <256){
hist1[k*256+i*32+j] = sharedHist[i*32+j];
};
syncthreads();
}
__global__ void histogram_distance_batched(int *hist1, int *hist2, OUT double *distance) {
*distance=0;
//__threadfence();
int i = threadIdx.x;
int k = blockIdx.x;
if (hist1[256*k+i] + hist2[256*k+i] != 0){
double temp = (double)((double)SQR(hist1[256*k+i] - hist2[256*k+i])) / (hist1[256*k+i] + hist2[256*k+i]);
atomicAdd((float*)distance,(float)temp);
};
}
int main() {
uchar *images1; /* we concatenate all images in one huge array */
uchar *images2;
CUDA_CHECK( cudaHostAlloc(&images1, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
CUDA_CHECK( cudaHostAlloc(&images2, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
load_image_pairs(images1, images2);
double t_start, t_finish;
double total_distance;
/* using CPU */
printf("\n=== CPU ===\n");
int histogram1[256];
int histogram2[256];
t_start = get_time_msec();
for (int i = 0; i < N_IMG_PAIRS; i++) {
image_to_histogram(&images1[i * IMG_DIMENSION * IMG_DIMENSION], histogram1);
image_to_histogram(&images2[i * IMG_DIMENSION * IMG_DIMENSION], histogram2);
total_distance += histogram_distance(histogram1, histogram2);
}
t_finish = get_time_msec();
printf("average distance between images %f\n", total_distance / N_IMG_PAIRS);
printf("total time %f [msec]\n", t_finish - t_start);
/* using GPU task-serial */
printf("\n=== GPU Task Serial ===\n");
do {
//* do {} while (0): to keep variables inside this block in their own scope. remove if you prefer otherwise *//*
//* Your Code Here *//*
uchar *gpu_image1, *gpu_image2; // TODO: allocate with cudaMalloc
CUDA_CHECK(cudaMalloc(&gpu_image1,1024*sizeof(uchar)));
CUDA_CHECK(cudaMalloc(&gpu_image2,1024*sizeof(uchar)));
int *gpu_hist1, *gpu_hist2; // TODO: allocate with cudaMalloc
CUDA_CHECK(cudaMalloc(&gpu_hist1,256*sizeof(int)));
CUDA_CHECK(cudaMalloc(&gpu_hist2,256*sizeof(int)));
CUDA_CHECK(cudaMemset(gpu_hist1,0,256*sizeof(int)));
CUDA_CHECK(cudaMemset(gpu_hist2,0,256*sizeof(int)));
double *gpu_hist_distance; //TODO: allocate with cudaMalloc
CUDA_CHECK(cudaMalloc(&gpu_hist_distance,sizeof(double)));
double cpu_hist_distance;
t_start = get_time_msec();
cudaProfilerStart();
for (int i = 0; i < N_IMG_PAIRS; i++) {
dim3 threadsPerBlock(32,32);
// TODO: copy relevant images from images1 and images2 to gpu_image1 and gpu_image2
CUDA_CHECK(cudaMemcpy(gpu_image1, images1+i*1024, 1024 * sizeof(uchar), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(gpu_image2, images2+ i*1024, 1024 * sizeof(uchar), cudaMemcpyHostToDevice));
image_to_hisogram_simple<<<1, threadsPerBlock>>>(gpu_image2, gpu_hist2);
image_to_hisogram_simple<<<1, threadsPerBlock>>>(gpu_image1, gpu_hist1);
histogram_distance<<<1, 256>>>(gpu_hist1, gpu_hist2, gpu_hist_distance);
//TODO: copy gpu_hist_distance to cpu_hist_distance
CUDA_CHECK(cudaMemcpy(&cpu_hist_distance, gpu_hist_distance, sizeof(double), cudaMemcpyDeviceToHost));
total_distance += cpu_hist_distance;
}
cudaProfilerStop();
CUDA_CHECK(cudaFree(gpu_hist1));
CUDA_CHECK(cudaFree(gpu_hist2));
CUDA_CHECK(cudaFree(gpu_image1));
CUDA_CHECK(cudaFree(gpu_image2));
CUDA_CHECK(cudaDeviceSynchronize());
t_finish = get_time_msec();
printf("average distance between images %f\n", total_distance / N_IMG_PAIRS);
printf("total time %f [msec]\n", t_finish - t_start);
} while (0);
/* using GPU task-serial + images and histograms in shared memory */
printf("\n=== GPU Task Serial with shared memory ===\n");
do { /* do {} while (0): to keep variables inside this block in their own scope. remove if you prefer otherwise */
/* Your Code Here */
uchar *gpu_image1_shared;
uchar *gpu_image2_shared; // TODO: allocate with cudaMalloc
CUDA_CHECK(cudaMalloc(&gpu_image1_shared,1024*sizeof(uchar)));
CUDA_CHECK(cudaMalloc(&gpu_image2_shared,1024*sizeof(uchar)));
int *gpu_hist1;
int *gpu_hist2; // TODO: allocate with cudaMalloc
CUDA_CHECK(cudaMalloc(&gpu_hist1,256*sizeof(int)));
CUDA_CHECK(cudaMalloc(&gpu_hist2,256*sizeof(int)));
//cudaMemset(&gpu_hist1,0,256*sizeof(int));
//cudaMemset(&gpu_hist2,0,256*sizeof(int));
double *gpu_hist_distance; //TODO: allocate with cudaMalloc
CUDA_CHECK(cudaMalloc(&gpu_hist_distance,sizeof(double)));
double cpu_hist_distance;
t_start = get_time_msec();
for (int i = 0; i < N_IMG_PAIRS; i++) {
dim3 threadsPerBlock(32,32);
// TODO: copy relevant images from images1 and images2 to gpu_image1 and gpu_image2
CUDA_CHECK(cudaMemcpy(gpu_image1_shared, images1+i*1024, 1024 * sizeof(uchar), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(gpu_image2_shared, images2+i*1024, 1024 * sizeof(uchar), cudaMemcpyHostToDevice));
image_to_hisogram_shared<<<1, threadsPerBlock>>>(gpu_image1_shared, gpu_hist1);
image_to_hisogram_shared<<<1, threadsPerBlock>>>(gpu_image2_shared, gpu_hist2);
//->move to global hiat
histogram_distance<<<1, 256>>>(gpu_hist1, gpu_hist2, gpu_hist_distance);
//TODO: copy gpu_hist_distance to cpu_hist_distance
CUDA_CHECK(cudaMemcpy(&cpu_hist_distance, gpu_hist_distance, sizeof(double), cudaMemcpyDeviceToHost));
total_distance += cpu_hist_distance;
}
CUDA_CHECK(cudaDeviceSynchronize());
CUDA_CHECK(cudaFree(gpu_hist1));
CUDA_CHECK(cudaFree(gpu_hist2));
CUDA_CHECK(cudaFree(gpu_image1_shared));
CUDA_CHECK(cudaFree(gpu_image2_shared));
t_finish = get_time_msec();
} while (0);
printf("average distance between images %f\n", total_distance / N_IMG_PAIRS);
printf("total time %f [msec]\n", t_finish - t_start);
/* using GPU + batching */
printf("\n=== GPU Batching ===\n");
do {
//* do {} while (0): to keep variables inside this block in their own scope. remove if you prefer otherwise *//*
//* Your Code Here *//*
uchar *gpu_image1, *gpu_image2; // TODO: allocate with cudaMalloc
CUDA_CHECK(cudaMalloc(&gpu_image1,N_IMG_PAIRS*1024*sizeof(uchar)));
CUDA_CHECK(cudaMalloc(&gpu_image2,N_IMG_PAIRS*1024*sizeof(uchar)));
int *gpu_hist1, *gpu_hist2; // TODO: allocate with cudaMalloc
CUDA_CHECK(cudaMalloc(&gpu_hist1,N_IMG_PAIRS*256*sizeof(int)));
CUDA_CHECK(cudaMalloc(&gpu_hist2,N_IMG_PAIRS*256*sizeof(int)));
CUDA_CHECK(cudaMemset(gpu_hist1,0,N_IMG_PAIRS*256*sizeof(int)));
CUDA_CHECK(cudaMemset(gpu_hist2,0,N_IMG_PAIRS*256*sizeof(int)));
double *gpu_hist_distance; //TODO: allocate with cudaMalloc
CUDA_CHECK(cudaMalloc(&gpu_hist_distance,sizeof(double)));
double cpu_hist_distance;
t_start = get_time_msec();
dim3 threadsPerBlock(32,32);
// TODO: copy relevant images from images1 and images2 to gpu_image1 and gpu_image2
CUDA_CHECK(cudaMemcpy(gpu_image1, images1, 1024 *N_IMG_PAIRS* sizeof(uchar), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(gpu_image2, images2, 1024 *N_IMG_PAIRS* sizeof(uchar), cudaMemcpyHostToDevice));
image_to_hisogram_batched<<<N_IMG_PAIRS, threadsPerBlock>>>(gpu_image2, gpu_hist2);
image_to_hisogram_batched<<<N_IMG_PAIRS, threadsPerBlock>>>(gpu_image1, gpu_hist1);
histogram_distance_batched<<<N_IMG_PAIRS, 256>>>(gpu_hist1, gpu_hist2, gpu_hist_distance);
//TODO: copy gpu_hist_distance to cpu_hist_distance
CUDA_CHECK(cudaMemcpy(&cpu_hist_distance, gpu_hist_distance, sizeof(double), cudaMemcpyDeviceToHost));
total_distance += cpu_hist_distance;
CUDA_CHECK(cudaDeviceSynchronize());
CUDA_CHECK(cudaFree(gpu_hist1));
CUDA_CHECK(cudaFree(gpu_hist2));
CUDA_CHECK(cudaFree(gpu_image1));
CUDA_CHECK(cudaFree(gpu_image2));
t_finish = get_time_msec();
} while (0);
/* Your Code Here */
printf("average distance between images %f\n", total_distance / N_IMG_PAIRS);
printf("total time %f [msec]\n", t_finish - t_start);
return 0;
}
//have a great day :) |
e3d504120054069ca1c4ed28947cce23f8d8bee2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//// KERNEL FUNCTIONS IMPLEMENTATION //////////////////////////////////////////////////////////////
#include "Kernels.h"
//// DEPRESSION FILLING ///////////////////////////////////////////////////////////////////////////
__global__ void DepressionFillingKernel(
float* wDEM,
float* zDEM,
int width,
int height,
float epsilon,
bool* pFinished)
{
int row = blockIdx.y * O_BLOCK_WIDTH + threadIdx.y + 1;
int col = blockIdx.x * O_BLOCK_WIDTH + threadIdx.x + 1;
int row_i = row - 1;
int col_i = col - 1;
int row_s = threadIdx.y + 1;
int col_s = threadIdx.x + 1;
__shared__ float ds_wDEM[I_BLOCK_WIDTH][I_BLOCK_WIDTH];
int center;
float zDEMCenter;
float wDEMCenter;
float wDEMNeighbor;
if (row_i < height && col_i < width)
{
ds_wDEM[threadIdx.y][threadIdx.x] = wDEM[row_i * width + col_i];
}
__syncthreads();
if (row < height - 1 && col < width - 1 && threadIdx.y < O_BLOCK_WIDTH && threadIdx.x < O_BLOCK_WIDTH)
{
center = row * width + col;
zDEMCenter = zDEM[center];
wDEMCenter = ds_wDEM[row_s][col_s];
if (wDEMCenter > zDEMCenter)
{
for (int k = row_s - 1; k <= row_s + 1; ++k)
{
for (int l = col_s - 1; l <= col_s + 1; ++l)
{
if (k != row_s || l != col_s)
{
wDEMNeighbor = ds_wDEM[k][l] + epsilon;
if (zDEMCenter >= wDEMNeighbor)
{
wDEMCenter = zDEMCenter;
ds_wDEM[row_s][col_s] = zDEMCenter;
*pFinished = false;
}
else
{
if (wDEMCenter > wDEMNeighbor)
{
wDEMCenter = wDEMNeighbor;
ds_wDEM[row_s][col_s] = wDEMNeighbor;
*pFinished = false;
}
}
}
}
}
wDEM[center] = wDEMCenter;
}
}
}
void DepressionFillingKernel_Wrapper(dim3 dimGrid, dim3 dimBlock,
float* wDEM,
float* zDEM,
int width,
int height,
float epsilon,
bool* pFinished)
{
hipLaunchKernelGGL(( DepressionFillingKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
wDEM, zDEM, width, height, epsilon, pFinished);
}
//// FLOW-TRANSFER-MATRIX FLOW ACCUMULATION ///////////////////////////////////////////////////////
__global__ void FlowAccumulationKernelM(
float* flowOld,
float* flowNew,
float* flowAcc,
float* flowFracs,
unsigned char* flowRevDirs,
int width,
int height,
bool* pFinished)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float newFlow;
unsigned char flowRevDir;
int center;
int stridedCenter;
int subCenterWidth;
int addCenterWidth;
if (row < height && col < width)
{
center = row * width + col;
stridedCenter = row * width * 8 + col;
subCenterWidth = center - width;
addCenterWidth = center + width;
flowRevDir = flowRevDirs[center];
newFlow = 0.0f;
if (flowRevDir & 1) newFlow += flowFracs[stridedCenter] * flowOld[subCenterWidth];
stridedCenter += width;
if (flowRevDir & 2) newFlow += flowFracs[stridedCenter] * flowOld[subCenterWidth + 1];
stridedCenter += width;
if (flowRevDir & 4) newFlow += flowFracs[stridedCenter] * flowOld[center + 1];
stridedCenter += width;
if (flowRevDir & 8) newFlow += flowFracs[stridedCenter] * flowOld[addCenterWidth + 1];
stridedCenter += width;
if (flowRevDir & 16) newFlow += flowFracs[stridedCenter] * flowOld[addCenterWidth];
stridedCenter += width;
if (flowRevDir & 32) newFlow += flowFracs[stridedCenter] * flowOld[addCenterWidth - 1];
stridedCenter += width;
if (flowRevDir & 64) newFlow += flowFracs[stridedCenter] * flowOld[center - 1];
stridedCenter += width;
if (flowRevDir & 128) newFlow += flowFracs[stridedCenter] * flowOld[subCenterWidth - 1];
flowAcc[center] += newFlow;
flowNew[center] = newFlow;
if (newFlow > 0.0f)
{
*pFinished = false;
}
}
}
void FlowAccumulationKernelM_Wrapper(dim3 dimGrid, dim3 dimBlock,
float* flowOld,
float* flowNew,
float* flowAcc,
float* flowFracs,
unsigned char* flowRevDirs,
int width,
int height,
bool* pFinished)
{
hipLaunchKernelGGL(( FlowAccumulationKernelM), dim3(dimGrid), dim3(dimBlock), 0, 0,
flowOld, flowNew, flowAcc, flowFracs, flowRevDirs, width, height, pFinished);
}
//// GRAPH-BASED FLOW ACCUMULATION ////////////////////////////////////////////////////////////////
__global__ void FlowAccumulationKernelG(
float* flowAcc,
float* flowFracs,
unsigned char* flowDirs,
unsigned char* flowRevDirs,
int* inDegreeMatrix,
int width,
int height,
bool* pFinished)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int center;
int stridedCenter;
int subCenterWidth;
int addCenterWidth;
float newFlow;
unsigned char dir;
if (row < height && col < width)
{
center = row * width + col;
if (inDegreeMatrix[center] == 0)
{
inDegreeMatrix[center] = -1;
*pFinished = false;
subCenterWidth = center - width;
addCenterWidth = center + width;
stridedCenter = row * width * 8 + col;
newFlow = 0.0f;
dir = flowRevDirs[center];
if (dir & 1) newFlow += flowFracs[stridedCenter] * flowAcc[subCenterWidth];
stridedCenter += width;
if (dir & 2) newFlow += flowFracs[stridedCenter] * flowAcc[subCenterWidth + 1];
stridedCenter += width;
if (dir & 4) newFlow += flowFracs[stridedCenter] * flowAcc[center + 1];
stridedCenter += width;
if (dir & 8) newFlow += flowFracs[stridedCenter] * flowAcc[addCenterWidth + 1];
stridedCenter += width;
if (dir & 16) newFlow += flowFracs[stridedCenter] * flowAcc[addCenterWidth];
stridedCenter += width;
if (dir & 32) newFlow += flowFracs[stridedCenter] * flowAcc[addCenterWidth - 1];
stridedCenter += width;
if (dir & 64) newFlow += flowFracs[stridedCenter] * flowAcc[center - 1];
stridedCenter += width;
if (dir & 128) newFlow += flowFracs[stridedCenter] * flowAcc[subCenterWidth - 1];
flowAcc[center] += newFlow;
dir = flowDirs[center];
if (dir & 1) atomicSub(&inDegreeMatrix[subCenterWidth], 1);
if (dir & 2) atomicSub(&inDegreeMatrix[subCenterWidth + 1], 1);
if (dir & 4) atomicSub(&inDegreeMatrix[center + 1], 1);
if (dir & 8) atomicSub(&inDegreeMatrix[addCenterWidth + 1], 1);
if (dir & 16) atomicSub(&inDegreeMatrix[addCenterWidth], 1);
if (dir & 32) atomicSub(&inDegreeMatrix[addCenterWidth - 1], 1);
if (dir & 64) atomicSub(&inDegreeMatrix[center - 1], 1);
if (dir & 128) atomicSub(&inDegreeMatrix[subCenterWidth - 1], 1);
}
}
}
void FlowAccumulationKernelG_Wrapper(dim3 dimGrid, dim3 dimBlock,
float* flowAcc,
float* flowFracs,
unsigned char* flowDirs,
unsigned char* flowRevDirs,
int* inDegreeMatrix,
int width,
int height,
bool* pFinished)
{
hipLaunchKernelGGL(( FlowAccumulationKernelG), dim3(dimGrid), dim3(dimBlock), 0, 0,
flowAcc, flowFracs, flowDirs, flowRevDirs, inDegreeMatrix, width, height, pFinished);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| e3d504120054069ca1c4ed28947cce23f8d8bee2.cu |
//// KERNEL FUNCTIONS IMPLEMENTATION //////////////////////////////////////////////////////////////
#include "Kernels.h"
//// DEPRESSION FILLING ///////////////////////////////////////////////////////////////////////////
__global__ void DepressionFillingKernel(
float* wDEM,
float* zDEM,
int width,
int height,
float epsilon,
bool* pFinished)
{
int row = blockIdx.y * O_BLOCK_WIDTH + threadIdx.y + 1;
int col = blockIdx.x * O_BLOCK_WIDTH + threadIdx.x + 1;
int row_i = row - 1;
int col_i = col - 1;
int row_s = threadIdx.y + 1;
int col_s = threadIdx.x + 1;
__shared__ float ds_wDEM[I_BLOCK_WIDTH][I_BLOCK_WIDTH];
int center;
float zDEMCenter;
float wDEMCenter;
float wDEMNeighbor;
if (row_i < height && col_i < width)
{
ds_wDEM[threadIdx.y][threadIdx.x] = wDEM[row_i * width + col_i];
}
__syncthreads();
if (row < height - 1 && col < width - 1 && threadIdx.y < O_BLOCK_WIDTH && threadIdx.x < O_BLOCK_WIDTH)
{
center = row * width + col;
zDEMCenter = zDEM[center];
wDEMCenter = ds_wDEM[row_s][col_s];
if (wDEMCenter > zDEMCenter)
{
for (int k = row_s - 1; k <= row_s + 1; ++k)
{
for (int l = col_s - 1; l <= col_s + 1; ++l)
{
if (k != row_s || l != col_s)
{
wDEMNeighbor = ds_wDEM[k][l] + epsilon;
if (zDEMCenter >= wDEMNeighbor)
{
wDEMCenter = zDEMCenter;
ds_wDEM[row_s][col_s] = zDEMCenter;
*pFinished = false;
}
else
{
if (wDEMCenter > wDEMNeighbor)
{
wDEMCenter = wDEMNeighbor;
ds_wDEM[row_s][col_s] = wDEMNeighbor;
*pFinished = false;
}
}
}
}
}
wDEM[center] = wDEMCenter;
}
}
}
void DepressionFillingKernel_Wrapper(dim3 dimGrid, dim3 dimBlock,
float* wDEM,
float* zDEM,
int width,
int height,
float epsilon,
bool* pFinished)
{
DepressionFillingKernel<<<dimGrid, dimBlock>>>
(wDEM, zDEM, width, height, epsilon, pFinished);
}
//// FLOW-TRANSFER-MATRIX FLOW ACCUMULATION ///////////////////////////////////////////////////////
__global__ void FlowAccumulationKernelM(
float* flowOld,
float* flowNew,
float* flowAcc,
float* flowFracs,
unsigned char* flowRevDirs,
int width,
int height,
bool* pFinished)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float newFlow;
unsigned char flowRevDir;
int center;
int stridedCenter;
int subCenterWidth;
int addCenterWidth;
if (row < height && col < width)
{
center = row * width + col;
stridedCenter = row * width * 8 + col;
subCenterWidth = center - width;
addCenterWidth = center + width;
flowRevDir = flowRevDirs[center];
newFlow = 0.0f;
if (flowRevDir & 1) newFlow += flowFracs[stridedCenter] * flowOld[subCenterWidth];
stridedCenter += width;
if (flowRevDir & 2) newFlow += flowFracs[stridedCenter] * flowOld[subCenterWidth + 1];
stridedCenter += width;
if (flowRevDir & 4) newFlow += flowFracs[stridedCenter] * flowOld[center + 1];
stridedCenter += width;
if (flowRevDir & 8) newFlow += flowFracs[stridedCenter] * flowOld[addCenterWidth + 1];
stridedCenter += width;
if (flowRevDir & 16) newFlow += flowFracs[stridedCenter] * flowOld[addCenterWidth];
stridedCenter += width;
if (flowRevDir & 32) newFlow += flowFracs[stridedCenter] * flowOld[addCenterWidth - 1];
stridedCenter += width;
if (flowRevDir & 64) newFlow += flowFracs[stridedCenter] * flowOld[center - 1];
stridedCenter += width;
if (flowRevDir & 128) newFlow += flowFracs[stridedCenter] * flowOld[subCenterWidth - 1];
flowAcc[center] += newFlow;
flowNew[center] = newFlow;
if (newFlow > 0.0f)
{
*pFinished = false;
}
}
}
void FlowAccumulationKernelM_Wrapper(dim3 dimGrid, dim3 dimBlock,
float* flowOld,
float* flowNew,
float* flowAcc,
float* flowFracs,
unsigned char* flowRevDirs,
int width,
int height,
bool* pFinished)
{
FlowAccumulationKernelM<<<dimGrid, dimBlock>>>
(flowOld, flowNew, flowAcc, flowFracs, flowRevDirs, width, height, pFinished);
}
//// GRAPH-BASED FLOW ACCUMULATION ////////////////////////////////////////////////////////////////
__global__ void FlowAccumulationKernelG(
float* flowAcc,
float* flowFracs,
unsigned char* flowDirs,
unsigned char* flowRevDirs,
int* inDegreeMatrix,
int width,
int height,
bool* pFinished)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int center;
int stridedCenter;
int subCenterWidth;
int addCenterWidth;
float newFlow;
unsigned char dir;
if (row < height && col < width)
{
center = row * width + col;
if (inDegreeMatrix[center] == 0)
{
inDegreeMatrix[center] = -1;
*pFinished = false;
subCenterWidth = center - width;
addCenterWidth = center + width;
stridedCenter = row * width * 8 + col;
newFlow = 0.0f;
dir = flowRevDirs[center];
if (dir & 1) newFlow += flowFracs[stridedCenter] * flowAcc[subCenterWidth];
stridedCenter += width;
if (dir & 2) newFlow += flowFracs[stridedCenter] * flowAcc[subCenterWidth + 1];
stridedCenter += width;
if (dir & 4) newFlow += flowFracs[stridedCenter] * flowAcc[center + 1];
stridedCenter += width;
if (dir & 8) newFlow += flowFracs[stridedCenter] * flowAcc[addCenterWidth + 1];
stridedCenter += width;
if (dir & 16) newFlow += flowFracs[stridedCenter] * flowAcc[addCenterWidth];
stridedCenter += width;
if (dir & 32) newFlow += flowFracs[stridedCenter] * flowAcc[addCenterWidth - 1];
stridedCenter += width;
if (dir & 64) newFlow += flowFracs[stridedCenter] * flowAcc[center - 1];
stridedCenter += width;
if (dir & 128) newFlow += flowFracs[stridedCenter] * flowAcc[subCenterWidth - 1];
flowAcc[center] += newFlow;
dir = flowDirs[center];
if (dir & 1) atomicSub(&inDegreeMatrix[subCenterWidth], 1);
if (dir & 2) atomicSub(&inDegreeMatrix[subCenterWidth + 1], 1);
if (dir & 4) atomicSub(&inDegreeMatrix[center + 1], 1);
if (dir & 8) atomicSub(&inDegreeMatrix[addCenterWidth + 1], 1);
if (dir & 16) atomicSub(&inDegreeMatrix[addCenterWidth], 1);
if (dir & 32) atomicSub(&inDegreeMatrix[addCenterWidth - 1], 1);
if (dir & 64) atomicSub(&inDegreeMatrix[center - 1], 1);
if (dir & 128) atomicSub(&inDegreeMatrix[subCenterWidth - 1], 1);
}
}
}
void FlowAccumulationKernelG_Wrapper(dim3 dimGrid, dim3 dimBlock,
float* flowAcc,
float* flowFracs,
unsigned char* flowDirs,
unsigned char* flowRevDirs,
int* inDegreeMatrix,
int width,
int height,
bool* pFinished)
{
FlowAccumulationKernelG<<<dimGrid, dimBlock>>>
(flowAcc, flowFracs, flowDirs, flowRevDirs, inDegreeMatrix, width, height, pFinished);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
e7b4b5d1f8e494a0a8bbb248a1ae78eae60e4f85.hip | // !!! This is a file automatically generated by hipify!!!
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2016 Maximilian Knespel
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "testVectorReduce.hpp"
#include <iostream>
#include <iomanip>
#include <cassert>
#include <cstdlib> // srand, rand
#include <cstdint> // uint32_t, uint64_t
#include <chrono>
#include <limits>
#include <vector>
#include <cmath>
#include <cfloat> // FLT_MAX
#include <bitset>
#include <hip/hip_runtime.h>
#include <hipfft.h> // hipfftComplex
#ifdef USE_FFTW
# include <fftw3.h>
# include "libs/hybridInputOutput.hpp"
#endif
#include "algorithms/vectorReduce.hpp"
#include "algorithms/cuda/cudaVectorReduce.hpp"
#include "benchmark/imresh/algorithms/cuda/cudaVectorReduce.hpp"
#include "libs/cudacommon.hpp"
#include "benchmarkHelper.hpp"
namespace imresh
{
namespace algorithms
{
unsigned int constexpr nRepetitions = 20;
template<class T_PREC>
bool compareFloat( const char * file, int line, T_PREC a, T_PREC b, T_PREC marginFactor = 1.0 )
{
auto const max = ::max( std::abs(a), std::abs(b) );
if ( max == 0 )
return true; // both are 0 and therefore equal
auto const relErr = fabs( a - b ) / max;
auto const maxRelErr = marginFactor * std::numeric_limits<T_PREC>::epsilon();
if ( not ( relErr <= maxRelErr ) )
printf( "[%s:%i] relErr: %f > %f :maxRelErr!\n", file, line, relErr, maxRelErr );
return relErr <= maxRelErr;
}
void testVectorReduce( void )
{
using namespace std::chrono;
using namespace benchmark::imresh::algorithms::cuda;
using namespace imresh::algorithms::cuda;
using namespace imresh::algorithms;
using namespace imresh::libs;
const unsigned nMaxElements = 64*1024*1024; // ~4000x4000 pixel
auto pData = new float[nMaxElements];
srand(350471643);
for ( unsigned i = 0; i < nMaxElements; ++i )
pData[i] = ( (float) rand() / RAND_MAX ) - 0.5f;
float * dpData;
mallocCudaArray( &dpData, nMaxElements );
CUDA_ERROR( hipMemcpy( dpData, pData, nMaxElements*sizeof(dpData[0]), hipMemcpyHostToDevice ) );
/* Test for array of length 1 */
assert( vectorMin( pData, 1 ) == pData[0] );
assert( vectorMax( pData, 1 ) == pData[0] );
assert( vectorSum( pData, 1 ) == pData[0] );
assert( cudaVectorMin( dpData, 1 ) == pData[0] );
assert( cudaVectorMax( dpData, 1 ) == pData[0] );
assert( cudaVectorSum( dpData, 1 ) == pData[0] );
/* do some checks with longer arrays and obvious results */
float obviousMaximum = 7.37519;
float obviousMinimum =-7.37519;
/* in order to filter out page time outs or similarily long random wait
* times, we repeat the measurement nRepetitions times and choose the
* shortest duration measured */
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
using clock = std::chrono::high_resolution_clock;
std::cout << "vector length : cudaVectorMax (global atomic) | cudaVectorMax (global atomic) | cudaVectorMax (shared memory) | cudaVectorMax (shared memory+warp reduce) | cudaVectorMax (__shfl_down) | vectorMax | cudaVectorMin (__shfl_down) | vectorMin\n";
using namespace imresh::tests;
for ( auto nElements : getLogSpacedSamplingPoints( 2, nMaxElements, 50 ) )
{
std::cout << std::setw(8) << nElements << " : ";
float milliseconds, minTime;
decltype( clock::now() ) clock0, clock1;
int iObviousValuePos = rand() % nElements;
// std::cout << "iObviousValuePos = " << iObviousValuePos << "\n";
// std::cout << "nElements = " << nElements << "\n";
/* Maximum */
pData[iObviousValuePos] = obviousMaximum;
CUDA_ERROR( hipMemcpy( dpData, pData, nElements*sizeof(dpData[0]), hipMemcpyHostToDevice ) );
#define TIME_GPU( FUNC, OBVIOUS_VALUE ) \
{ \
minTime = FLT_MAX; \
for ( unsigned iRepetition = 0; iRepetition < nRepetitions; \
++iRepetition ) \
{ \
hipEventRecord( start ); \
auto cudaReduced = FUNC( dpData, nElements ); \
hipEventRecord( stop ); \
hipEventSynchronize( stop ); \
hipEventElapsedTime( &milliseconds, start, stop ); \
minTime = fmin( minTime, milliseconds ); \
assert( cudaReduced == OBVIOUS_VALUE ); \
} \
std::cout << std::setw(8) << minTime << " |" << std::flush; \
}
TIME_GPU( cudaVectorMaxGlobalAtomic2 , obviousMaximum )
TIME_GPU( cudaVectorMaxGlobalAtomic , obviousMaximum )
TIME_GPU( cudaVectorMaxSharedMemory , obviousMaximum )
TIME_GPU( cudaVectorMaxSharedMemoryWarps, obviousMaximum )
TIME_GPU( cudaVectorMax , obviousMaximum )
/* time CPU */
#define TIME_CPU( FUNC, OBVIOUS_VALUE ) \
{ \
minTime = FLT_MAX; \
for ( unsigned iRepetition = 0; iRepetition < nRepetitions; \
++iRepetition ) \
{ \
clock0 = clock::now(); \
auto cpuMax = FUNC( pData, nElements ); \
clock1 = clock::now(); \
auto seconds = duration_cast<duration<double>>( \
clock1 - clock0 ); \
minTime = fmin( minTime, seconds.count() * 1000 ); \
assert( cpuMax == OBVIOUS_VALUE ); \
} \
std::cout << std::setw(8) << minTime << " |" << std::flush; \
}
TIME_CPU( vectorMax, obviousMaximum )
/* Minimum */
pData[iObviousValuePos] = obviousMinimum;
CUDA_ERROR( hipMemcpy( dpData, pData, nElements*sizeof(dpData[0]), hipMemcpyHostToDevice ) );
TIME_GPU( cudaVectorMin, obviousMinimum )
TIME_CPU( vectorMin, obviousMinimum )
/* set obvious value back to random value */
pData[iObviousValuePos] = (float) rand() / RAND_MAX;
std::cout << "\n";
#undef TIME_GPU
#undef TIME_CPU
}
//for ( unsigned nElements = 2; nElements
CUDA_ERROR( hipFree( dpData ) );
delete[] pData;
}
template<class T_MASK, class T_PACKED>
__attribute__(( optimize("unroll-loops") ))
void unpackBitMask
(
T_MASK * const __restrict__ rMask,
T_PACKED const * const __restrict__ rPackedBits,
unsigned int const nElements
)
{
auto const nElem = rMask + nElements;
auto constexpr nBits = sizeof( T_PACKED ) * 8u;
auto iPacked = rPackedBits;
for ( auto iElem = rMask; iElem < nElem; ++iPacked )
{
auto bitMask = T_PACKED(0x01) << ( nBits-1 );
for ( auto iBit = 0u; iBit < nBits; ++iBit, ++iElem )
{
if ( iElem >= nElem )
break;
assert( bitMask != T_MASK(0) );
assert( iElem < rMask + nElements );
assert( iPacked < rPackedBits + ceilDiv( nElements, nBits ) );
*iElem = T_MASK( (*iPacked & bitMask) != 0 );
bitMask >>= 1;
}
}
}
void testUnpackBitMask( void )
{
uint32_t packed = 0x33333333;
constexpr auto nElements = 8 * sizeof( packed );
bool unpacked[ nElements ];
unpacked[ nElements-2 ] = 1;
unpacked[ nElements-1 ] = 0;
unpackBitMask( unpacked, &packed, nElements-2 );
for ( auto i = 0u; i < (nElements-2)/2; ++i )
{
assert( unpacked[2*i+0] == i % 2 );
assert( unpacked[2*i+1] == i % 2 );
}
assert( unpacked[ nElements-2 ] == 1 );
assert( unpacked[ nElements-1 ] == 0 );
}
void testCalculateHioError( void )
{
using namespace std::chrono;
using namespace benchmark::imresh::algorithms::cuda; // cudaCalculateHioErrorBitPacked
using namespace imresh::algorithms::cuda; // cudaKernelCalculateHioError
using namespace imresh::libs; // calculateHioError, mallocCudaArray
using namespace imresh::tests; // getLogSpacedSamplingPoints
const unsigned nMaxElements = 64*1024*1024; // ~4000x4000 pixel
/* allocate */
hipfftComplex * dpData, * pData;
unsigned char * dpIsMaskedChar, * pIsMaskedChar;
float * dpIsMasked , * pIsMasked;
unsigned * dpBitMasked , * pBitMasked;
auto const nBitMaskedElements = ceilDiv( nMaxElements, 8 * sizeof( dpBitMasked[0] ) );
mallocCudaArray( &dpIsMaskedChar, nMaxElements );
mallocCudaArray( &dpData , nMaxElements );
mallocCudaArray( &dpIsMasked , nMaxElements );
mallocCudaArray( &dpBitMasked , nBitMaskedElements );
pData = new hipfftComplex [ nMaxElements ];
pIsMaskedChar = new unsigned char[ nMaxElements ];
pIsMasked = new float [ nMaxElements ];
pBitMasked = new unsigned[ nBitMaskedElements ];
/* allocate result buffer for reduced values of calculateHioError
* kernel call */
float nMaskedPixels, * dpnMaskedPixels;
float totalError , * dpTotalError;
mallocCudaArray( &dpnMaskedPixels, 1 );
mallocCudaArray( &dpTotalError , 1 );
/* initialize mask randomly */
assert( sizeof(int) == 4 );
srand(350471643);
for ( auto i = 0u; i < nBitMaskedElements; ++i )
pBitMasked[i] = rand() % UINT_MAX;
unpackBitMask( pIsMasked, pBitMasked, nMaxElements );
for ( auto i = 0u; i < nMaxElements; ++i )
{
pIsMaskedChar[i] = pIsMasked[i];
assert( pIsMaskedChar[i] == 0 or pIsMaskedChar[i] == 1 );
}
std::cout << "[unpacked] ";
for ( int i = 0; i < 32; ++i )
std::cout << pIsMasked[i];
std::cout << "\n";
std::cout << "[ packed] " << std::bitset<32>( pBitMasked[0] ) << "\n";
/* initialize data with Pythagorean triple 3*3 + 4*4 = 5*5 for masked bits */
for ( auto i = 0u; i < nMaxElements; ++i )
{
if ( pIsMasked[i] )
{
pData[i].x = 3.0f;
pData[i].y = 4.0f;
}
else
{
pData[i].x = (float) rand() / RAND_MAX;
pData[i].y = (float) rand() / RAND_MAX;
}
}
/* if calculateHioError works correctly then we simply get
* #masked * 5 as the mean complex norm error */
/* push to GPU */
CUDA_ERROR( hipMemcpy( dpData , pData , nMaxElements * sizeof( pData [0] ), hipMemcpyHostToDevice ) );
CUDA_ERROR( hipMemcpy( dpIsMasked , pIsMasked , nMaxElements * sizeof( pIsMasked[0] ), hipMemcpyHostToDevice ) );
CUDA_ERROR( hipMemcpy( dpBitMasked, pBitMasked, nBitMaskedElements * sizeof( pBitMasked[0] ), hipMemcpyHostToDevice ) );
CUDA_ERROR( hipMemcpy( dpIsMaskedChar, pIsMaskedChar, nMaxElements * sizeof( pIsMaskedChar[0] ), hipMemcpyHostToDevice ) );
std::cout << "test with randomly masked pythagorean triples";
/* because the number of elements we include only increases the number
* of found masked elements should also only increase. */
float nLastMaskedPixels = 0;
for ( auto nElements : getLogSpacedSamplingPoints( 2, nMaxElements, 50 ) )
{
std::cout << "." << std::flush;
CUDA_ERROR( hipMemset( dpnMaskedPixels, 0, sizeof(float) ) );
CUDA_ERROR( hipMemset( dpTotalError , 0, sizeof(float) ) );
hipLaunchKernelGGL(( cudaKernelCalculateHioError), dim3(3),dim3(256), 0, 0,
dpData, dpIsMasked, nElements, false /* don't invert mask */,
dpTotalError, dpnMaskedPixels );
CUDA_ERROR( hipMemcpy( &nMaskedPixels, dpnMaskedPixels,
sizeof(float), hipMemcpyDeviceToHost) );
CUDA_ERROR( hipMemcpy( &totalError, dpTotalError,
sizeof(float), hipMemcpyDeviceToHost) );
/* Calculation done, now check if everything is correct */
if ( totalError < 16777216 ) // float vlaues higher round to multiple of 2
{
assert( nLastMaskedPixels <= nMaskedPixels );
assert( (unsigned) totalError % 5 == 0 );
assert( nMaskedPixels * 5 == totalError );
}
nLastMaskedPixels = nMaskedPixels;
/* check char version */
CUDA_ERROR( hipMemset( dpnMaskedPixels, 0, sizeof(float) ) );
CUDA_ERROR( hipMemset( dpTotalError , 0, sizeof(float) ) );
hipLaunchKernelGGL(( cudaKernelCalculateHioError), dim3(3),dim3(256), 0, 0,
dpData, dpIsMaskedChar, nElements, false /* don't invert mask */,
dpTotalError, dpnMaskedPixels );
CUDA_ERROR( hipMemcpy( &nMaskedPixels, dpnMaskedPixels,
sizeof(float), hipMemcpyDeviceToHost) );
CUDA_ERROR( hipMemcpy( &totalError, dpTotalError,
sizeof(float), hipMemcpyDeviceToHost) );
/* Calculation done, now check if everything is correct */
if ( totalError < 16777216 ) // float vlaues higher round to multiple of 2
{
assert( nLastMaskedPixels == nMaskedPixels );
assert( (unsigned) totalError % 5 == 0 );
assert( nMaskedPixels * 5 == totalError );
}
/* check packed bit version */
CUDA_ERROR( hipMemset( dpnMaskedPixels, 0, sizeof(float) ) );
CUDA_ERROR( hipMemset( dpTotalError , 0, sizeof(float) ) );
hipLaunchKernelGGL(( cudaKernelCalculateHioErrorBitPacked), dim3(1),dim3(32), 0, 0,
dpData, dpBitMasked, nElements, dpTotalError, dpnMaskedPixels );
CUDA_ERROR( hipMemcpy( &nMaskedPixels, dpnMaskedPixels,
sizeof(float), hipMemcpyDeviceToHost) );
CUDA_ERROR( hipMemcpy( &totalError, dpTotalError,
sizeof(float), hipMemcpyDeviceToHost) );
/* Calculation done, now check if everything is correct */
if ( totalError < 16777216 ) // float vlaues higher round to multiple of 2
{
if ( not ( nLastMaskedPixels == nMaskedPixels ) )
{
printf( "nLastMaskedPixels: %f, nMaskedPixels: %f, totalError: %f\n", nLastMaskedPixels, nMaskedPixels, totalError );
assert( nLastMaskedPixels == nMaskedPixels );
}
if ( not ( (unsigned) totalError % 5 == 0 ) )
{
printf( "totalError: %f, nMaskedPixels: %f\n", totalError, nMaskedPixels );
assert( (unsigned) totalError % 5 == 0 );
}
assert( nMaskedPixels * 5 == totalError );
}
else
{
/* no use continuing this loop if we can't assert anything */
break;
}
#ifdef USE_FFTW
static_assert( sizeof( hipfftComplex ) == sizeof( fftwf_complex ), "" );
/* now compare with CPU version which should give the exact same
* result, as there should be no floating point rounding errors
* for relatively short array ( < 1e6 ? ) */
float nMaskedPixelsCpu, totalErrorCpu;
calculateHioError( (fftwf_complex*) pData, pIsMasked, nElements, /* is inverted: */ false, &totalErrorCpu, &nMaskedPixelsCpu );
/* when rounding errors occur the order becomes important */
if ( totalError < 16777216 )
{
assert( compareFloat( __FILE__, __LINE__, totalError, totalErrorCpu, sqrtf(nElements) ) );
assert( nMaskedPixelsCpu == nMaskedPixels );
}
#endif
}
std::cout << "OK\n";
/* benchmark with random numbers */
for ( auto i = 0u; i < nBitMaskedElements; ++i )
{
pData[i].x = (float) rand() / RAND_MAX;
pData[i].y = (float) rand() / RAND_MAX;
}
CUDA_ERROR( hipMemcpy( dpData, pData, nMaxElements * sizeof( pData[0] ), hipMemcpyHostToDevice ) );
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
using clock = std::chrono::high_resolution_clock;
std::cout << "time in milliseconds:\n";
std::cout << "vector length : cudaCalcHioError(uint32_t) | cudaCalcHioError(char) | cudaCalcHioError(packed) | calcHioError (CPU) |\n";
for ( auto nElements : getLogSpacedSamplingPoints( 2, nMaxElements, 50 ) )
{
std::cout << std::setw(8) << nElements << " : ";
float milliseconds, minTime;
decltype( clock::now() ) clock0, clock1;
float error;
#define TIME_GPU( FUNC, MASK ) \
minTime = FLT_MAX; \
for ( auto iRepetition = 0u; iRepetition < nRepetitions; \
++iRepetition ) \
{ \
hipEventRecord( start ); \
error = FUNC( dpData, MASK, nElements ); \
hipEventRecord( stop ); \
hipEventSynchronize( stop ); \
hipEventElapsedTime( &milliseconds, start, stop ); \
minTime = fmin( minTime, milliseconds ); \
assert( error <= nElements ); \
} \
std::cout << std::setw(8) << minTime << " |" << std::flush;
TIME_GPU( cudaCalculateHioError, dpIsMasked )
auto unpackedError = error;
TIME_GPU( cudaCalculateHioError, dpIsMaskedChar ) // sets error
compareFloat( __FILE__, __LINE__, unpackedError, error, sqrtf(nElements) );
TIME_GPU( cudaCalculateHioErrorBitPacked, dpBitMasked ) // sets error
compareFloat( __FILE__, __LINE__, unpackedError, error, sqrtf(nElements) );
#ifdef USE_FFTW
/* time CPU */
minTime = FLT_MAX;
for ( auto iRepetition = 0u; iRepetition < nRepetitions;
++iRepetition )
{
clock0 = clock::now();
auto error = calculateHioError( (fftwf_complex*) pData, pIsMasked, nElements );
clock1 = clock::now();
auto seconds = duration_cast<duration<double>>( clock1 - clock0 );
minTime = fmin( minTime, seconds.count() * 1000 );
assert( error <= nElements );
}
#endif
std::cout << std::setw(8) << minTime << "\n" << std::flush;
}
/* free */
CUDA_ERROR( hipFree( dpnMaskedPixels ) );
CUDA_ERROR( hipFree( dpTotalError ) );
CUDA_ERROR( hipFree( dpData ) );
CUDA_ERROR( hipFree( dpIsMasked ) );
CUDA_ERROR( hipFree( dpBitMasked ) );
delete[] pData;
delete[] pIsMasked;
delete[] pBitMasked;
}
} // namespace algorithms
} // namespace imresh
| e7b4b5d1f8e494a0a8bbb248a1ae78eae60e4f85.cu | /*
* The MIT License (MIT)
*
* Copyright (c) 2015-2016 Maximilian Knespel
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "testVectorReduce.hpp"
#include <iostream>
#include <iomanip>
#include <cassert>
#include <cstdlib> // srand, rand
#include <cstdint> // uint32_t, uint64_t
#include <chrono>
#include <limits>
#include <vector>
#include <cmath>
#include <cfloat> // FLT_MAX
#include <bitset>
#include <cuda_runtime.h>
#include <cufft.h> // cufftComplex
#ifdef USE_FFTW
# include <fftw3.h>
# include "libs/hybridInputOutput.hpp"
#endif
#include "algorithms/vectorReduce.hpp"
#include "algorithms/cuda/cudaVectorReduce.hpp"
#include "benchmark/imresh/algorithms/cuda/cudaVectorReduce.hpp"
#include "libs/cudacommon.hpp"
#include "benchmarkHelper.hpp"
namespace imresh
{
namespace algorithms
{
unsigned int constexpr nRepetitions = 20;
template<class T_PREC>
bool compareFloat( const char * file, int line, T_PREC a, T_PREC b, T_PREC marginFactor = 1.0 )
{
auto const max = std::max( std::abs(a), std::abs(b) );
if ( max == 0 )
return true; // both are 0 and therefore equal
auto const relErr = fabs( a - b ) / max;
auto const maxRelErr = marginFactor * std::numeric_limits<T_PREC>::epsilon();
if ( not ( relErr <= maxRelErr ) )
printf( "[%s:%i] relErr: %f > %f :maxRelErr!\n", file, line, relErr, maxRelErr );
return relErr <= maxRelErr;
}
void testVectorReduce( void )
{
using namespace std::chrono;
using namespace benchmark::imresh::algorithms::cuda;
using namespace imresh::algorithms::cuda;
using namespace imresh::algorithms;
using namespace imresh::libs;
const unsigned nMaxElements = 64*1024*1024; // ~4000x4000 pixel
auto pData = new float[nMaxElements];
srand(350471643);
for ( unsigned i = 0; i < nMaxElements; ++i )
pData[i] = ( (float) rand() / RAND_MAX ) - 0.5f;
float * dpData;
mallocCudaArray( &dpData, nMaxElements );
CUDA_ERROR( cudaMemcpy( dpData, pData, nMaxElements*sizeof(dpData[0]), cudaMemcpyHostToDevice ) );
/* Test for array of length 1 */
assert( vectorMin( pData, 1 ) == pData[0] );
assert( vectorMax( pData, 1 ) == pData[0] );
assert( vectorSum( pData, 1 ) == pData[0] );
assert( cudaVectorMin( dpData, 1 ) == pData[0] );
assert( cudaVectorMax( dpData, 1 ) == pData[0] );
assert( cudaVectorSum( dpData, 1 ) == pData[0] );
/* do some checks with longer arrays and obvious results */
float obviousMaximum = 7.37519;
float obviousMinimum =-7.37519;
/* in order to filter out page time outs or similarily long random wait
* times, we repeat the measurement nRepetitions times and choose the
* shortest duration measured */
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
using clock = std::chrono::high_resolution_clock;
std::cout << "vector length : cudaVectorMax (global atomic) | cudaVectorMax (global atomic) | cudaVectorMax (shared memory) | cudaVectorMax (shared memory+warp reduce) | cudaVectorMax (__shfl_down) | vectorMax | cudaVectorMin (__shfl_down) | vectorMin\n";
using namespace imresh::tests;
for ( auto nElements : getLogSpacedSamplingPoints( 2, nMaxElements, 50 ) )
{
std::cout << std::setw(8) << nElements << " : ";
float milliseconds, minTime;
decltype( clock::now() ) clock0, clock1;
int iObviousValuePos = rand() % nElements;
// std::cout << "iObviousValuePos = " << iObviousValuePos << "\n";
// std::cout << "nElements = " << nElements << "\n";
/* Maximum */
pData[iObviousValuePos] = obviousMaximum;
CUDA_ERROR( cudaMemcpy( dpData, pData, nElements*sizeof(dpData[0]), cudaMemcpyHostToDevice ) );
#define TIME_GPU( FUNC, OBVIOUS_VALUE ) \
{ \
minTime = FLT_MAX; \
for ( unsigned iRepetition = 0; iRepetition < nRepetitions; \
++iRepetition ) \
{ \
cudaEventRecord( start ); \
auto cudaReduced = FUNC( dpData, nElements ); \
cudaEventRecord( stop ); \
cudaEventSynchronize( stop ); \
cudaEventElapsedTime( &milliseconds, start, stop ); \
minTime = fmin( minTime, milliseconds ); \
assert( cudaReduced == OBVIOUS_VALUE ); \
} \
std::cout << std::setw(8) << minTime << " |" << std::flush; \
}
TIME_GPU( cudaVectorMaxGlobalAtomic2 , obviousMaximum )
TIME_GPU( cudaVectorMaxGlobalAtomic , obviousMaximum )
TIME_GPU( cudaVectorMaxSharedMemory , obviousMaximum )
TIME_GPU( cudaVectorMaxSharedMemoryWarps, obviousMaximum )
TIME_GPU( cudaVectorMax , obviousMaximum )
/* time CPU */
#define TIME_CPU( FUNC, OBVIOUS_VALUE ) \
{ \
minTime = FLT_MAX; \
for ( unsigned iRepetition = 0; iRepetition < nRepetitions; \
++iRepetition ) \
{ \
clock0 = clock::now(); \
auto cpuMax = FUNC( pData, nElements ); \
clock1 = clock::now(); \
auto seconds = duration_cast<duration<double>>( \
clock1 - clock0 ); \
minTime = fmin( minTime, seconds.count() * 1000 ); \
assert( cpuMax == OBVIOUS_VALUE ); \
} \
std::cout << std::setw(8) << minTime << " |" << std::flush; \
}
TIME_CPU( vectorMax, obviousMaximum )
/* Minimum */
pData[iObviousValuePos] = obviousMinimum;
CUDA_ERROR( cudaMemcpy( dpData, pData, nElements*sizeof(dpData[0]), cudaMemcpyHostToDevice ) );
TIME_GPU( cudaVectorMin, obviousMinimum )
TIME_CPU( vectorMin, obviousMinimum )
/* set obvious value back to random value */
pData[iObviousValuePos] = (float) rand() / RAND_MAX;
std::cout << "\n";
#undef TIME_GPU
#undef TIME_CPU
}
//for ( unsigned nElements = 2; nElements
CUDA_ERROR( cudaFree( dpData ) );
delete[] pData;
}
template<class T_MASK, class T_PACKED>
__attribute__(( optimize("unroll-loops") ))
void unpackBitMask
(
T_MASK * const __restrict__ rMask,
T_PACKED const * const __restrict__ rPackedBits,
unsigned int const nElements
)
{
auto const nElem = rMask + nElements;
auto constexpr nBits = sizeof( T_PACKED ) * 8u;
auto iPacked = rPackedBits;
for ( auto iElem = rMask; iElem < nElem; ++iPacked )
{
auto bitMask = T_PACKED(0x01) << ( nBits-1 );
for ( auto iBit = 0u; iBit < nBits; ++iBit, ++iElem )
{
if ( iElem >= nElem )
break;
assert( bitMask != T_MASK(0) );
assert( iElem < rMask + nElements );
assert( iPacked < rPackedBits + ceilDiv( nElements, nBits ) );
*iElem = T_MASK( (*iPacked & bitMask) != 0 );
bitMask >>= 1;
}
}
}
void testUnpackBitMask( void )
{
uint32_t packed = 0x33333333;
constexpr auto nElements = 8 * sizeof( packed );
bool unpacked[ nElements ];
unpacked[ nElements-2 ] = 1;
unpacked[ nElements-1 ] = 0;
unpackBitMask( unpacked, &packed, nElements-2 );
for ( auto i = 0u; i < (nElements-2)/2; ++i )
{
assert( unpacked[2*i+0] == i % 2 );
assert( unpacked[2*i+1] == i % 2 );
}
assert( unpacked[ nElements-2 ] == 1 );
assert( unpacked[ nElements-1 ] == 0 );
}
void testCalculateHioError( void )
{
using namespace std::chrono;
using namespace benchmark::imresh::algorithms::cuda; // cudaCalculateHioErrorBitPacked
using namespace imresh::algorithms::cuda; // cudaKernelCalculateHioError
using namespace imresh::libs; // calculateHioError, mallocCudaArray
using namespace imresh::tests; // getLogSpacedSamplingPoints
const unsigned nMaxElements = 64*1024*1024; // ~4000x4000 pixel
/* allocate */
cufftComplex * dpData, * pData;
unsigned char * dpIsMaskedChar, * pIsMaskedChar;
float * dpIsMasked , * pIsMasked;
unsigned * dpBitMasked , * pBitMasked;
auto const nBitMaskedElements = ceilDiv( nMaxElements, 8 * sizeof( dpBitMasked[0] ) );
mallocCudaArray( &dpIsMaskedChar, nMaxElements );
mallocCudaArray( &dpData , nMaxElements );
mallocCudaArray( &dpIsMasked , nMaxElements );
mallocCudaArray( &dpBitMasked , nBitMaskedElements );
pData = new cufftComplex [ nMaxElements ];
pIsMaskedChar = new unsigned char[ nMaxElements ];
pIsMasked = new float [ nMaxElements ];
pBitMasked = new unsigned[ nBitMaskedElements ];
/* allocate result buffer for reduced values of calculateHioError
* kernel call */
float nMaskedPixels, * dpnMaskedPixels;
float totalError , * dpTotalError;
mallocCudaArray( &dpnMaskedPixels, 1 );
mallocCudaArray( &dpTotalError , 1 );
/* initialize mask randomly */
assert( sizeof(int) == 4 );
srand(350471643);
for ( auto i = 0u; i < nBitMaskedElements; ++i )
pBitMasked[i] = rand() % UINT_MAX;
unpackBitMask( pIsMasked, pBitMasked, nMaxElements );
for ( auto i = 0u; i < nMaxElements; ++i )
{
pIsMaskedChar[i] = pIsMasked[i];
assert( pIsMaskedChar[i] == 0 or pIsMaskedChar[i] == 1 );
}
std::cout << "[unpacked] ";
for ( int i = 0; i < 32; ++i )
std::cout << pIsMasked[i];
std::cout << "\n";
std::cout << "[ packed] " << std::bitset<32>( pBitMasked[0] ) << "\n";
/* initialize data with Pythagorean triple 3*3 + 4*4 = 5*5 for masked bits */
for ( auto i = 0u; i < nMaxElements; ++i )
{
if ( pIsMasked[i] )
{
pData[i].x = 3.0f;
pData[i].y = 4.0f;
}
else
{
pData[i].x = (float) rand() / RAND_MAX;
pData[i].y = (float) rand() / RAND_MAX;
}
}
/* if calculateHioError works correctly then we simply get
* #masked * 5 as the mean complex norm error */
/* push to GPU */
CUDA_ERROR( cudaMemcpy( dpData , pData , nMaxElements * sizeof( pData [0] ), cudaMemcpyHostToDevice ) );
CUDA_ERROR( cudaMemcpy( dpIsMasked , pIsMasked , nMaxElements * sizeof( pIsMasked[0] ), cudaMemcpyHostToDevice ) );
CUDA_ERROR( cudaMemcpy( dpBitMasked, pBitMasked, nBitMaskedElements * sizeof( pBitMasked[0] ), cudaMemcpyHostToDevice ) );
CUDA_ERROR( cudaMemcpy( dpIsMaskedChar, pIsMaskedChar, nMaxElements * sizeof( pIsMaskedChar[0] ), cudaMemcpyHostToDevice ) );
std::cout << "test with randomly masked pythagorean triples";
/* because the number of elements we include only increases the number
* of found masked elements should also only increase. */
float nLastMaskedPixels = 0;
for ( auto nElements : getLogSpacedSamplingPoints( 2, nMaxElements, 50 ) )
{
std::cout << "." << std::flush;
CUDA_ERROR( cudaMemset( dpnMaskedPixels, 0, sizeof(float) ) );
CUDA_ERROR( cudaMemset( dpTotalError , 0, sizeof(float) ) );
cudaKernelCalculateHioError<<<3,256>>>
( dpData, dpIsMasked, nElements, false /* don't invert mask */,
dpTotalError, dpnMaskedPixels );
CUDA_ERROR( cudaMemcpy( &nMaskedPixels, dpnMaskedPixels,
sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_ERROR( cudaMemcpy( &totalError, dpTotalError,
sizeof(float), cudaMemcpyDeviceToHost) );
/* Calculation done, now check if everything is correct */
if ( totalError < 16777216 ) // float vlaues higher round to multiple of 2
{
assert( nLastMaskedPixels <= nMaskedPixels );
assert( (unsigned) totalError % 5 == 0 );
assert( nMaskedPixels * 5 == totalError );
}
nLastMaskedPixels = nMaskedPixels;
/* check char version */
CUDA_ERROR( cudaMemset( dpnMaskedPixels, 0, sizeof(float) ) );
CUDA_ERROR( cudaMemset( dpTotalError , 0, sizeof(float) ) );
cudaKernelCalculateHioError<<<3,256>>>
( dpData, dpIsMaskedChar, nElements, false /* don't invert mask */,
dpTotalError, dpnMaskedPixels );
CUDA_ERROR( cudaMemcpy( &nMaskedPixels, dpnMaskedPixels,
sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_ERROR( cudaMemcpy( &totalError, dpTotalError,
sizeof(float), cudaMemcpyDeviceToHost) );
/* Calculation done, now check if everything is correct */
if ( totalError < 16777216 ) // float vlaues higher round to multiple of 2
{
assert( nLastMaskedPixels == nMaskedPixels );
assert( (unsigned) totalError % 5 == 0 );
assert( nMaskedPixels * 5 == totalError );
}
/* check packed bit version */
CUDA_ERROR( cudaMemset( dpnMaskedPixels, 0, sizeof(float) ) );
CUDA_ERROR( cudaMemset( dpTotalError , 0, sizeof(float) ) );
cudaKernelCalculateHioErrorBitPacked<<<1,32>>>
( dpData, dpBitMasked, nElements, dpTotalError, dpnMaskedPixels );
CUDA_ERROR( cudaMemcpy( &nMaskedPixels, dpnMaskedPixels,
sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_ERROR( cudaMemcpy( &totalError, dpTotalError,
sizeof(float), cudaMemcpyDeviceToHost) );
/* Calculation done, now check if everything is correct */
if ( totalError < 16777216 ) // float vlaues higher round to multiple of 2
{
if ( not ( nLastMaskedPixels == nMaskedPixels ) )
{
printf( "nLastMaskedPixels: %f, nMaskedPixels: %f, totalError: %f\n", nLastMaskedPixels, nMaskedPixels, totalError );
assert( nLastMaskedPixels == nMaskedPixels );
}
if ( not ( (unsigned) totalError % 5 == 0 ) )
{
printf( "totalError: %f, nMaskedPixels: %f\n", totalError, nMaskedPixels );
assert( (unsigned) totalError % 5 == 0 );
}
assert( nMaskedPixels * 5 == totalError );
}
else
{
/* no use continuing this loop if we can't assert anything */
break;
}
#ifdef USE_FFTW
static_assert( sizeof( cufftComplex ) == sizeof( fftwf_complex ), "" );
/* now compare with CPU version which should give the exact same
* result, as there should be no floating point rounding errors
* for relatively short array ( < 1e6 ? ) */
float nMaskedPixelsCpu, totalErrorCpu;
calculateHioError( (fftwf_complex*) pData, pIsMasked, nElements, /* is inverted: */ false, &totalErrorCpu, &nMaskedPixelsCpu );
/* when rounding errors occur the order becomes important */
if ( totalError < 16777216 )
{
assert( compareFloat( __FILE__, __LINE__, totalError, totalErrorCpu, sqrtf(nElements) ) );
assert( nMaskedPixelsCpu == nMaskedPixels );
}
#endif
}
std::cout << "OK\n";
/* benchmark with random numbers */
for ( auto i = 0u; i < nBitMaskedElements; ++i )
{
pData[i].x = (float) rand() / RAND_MAX;
pData[i].y = (float) rand() / RAND_MAX;
}
CUDA_ERROR( cudaMemcpy( dpData, pData, nMaxElements * sizeof( pData[0] ), cudaMemcpyHostToDevice ) );
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
using clock = std::chrono::high_resolution_clock;
std::cout << "time in milliseconds:\n";
std::cout << "vector length : cudaCalcHioError(uint32_t) | cudaCalcHioError(char) | cudaCalcHioError(packed) | calcHioError (CPU) |\n";
for ( auto nElements : getLogSpacedSamplingPoints( 2, nMaxElements, 50 ) )
{
std::cout << std::setw(8) << nElements << " : ";
float milliseconds, minTime;
decltype( clock::now() ) clock0, clock1;
float error;
#define TIME_GPU( FUNC, MASK ) \
minTime = FLT_MAX; \
for ( auto iRepetition = 0u; iRepetition < nRepetitions; \
++iRepetition ) \
{ \
cudaEventRecord( start ); \
error = FUNC( dpData, MASK, nElements ); \
cudaEventRecord( stop ); \
cudaEventSynchronize( stop ); \
cudaEventElapsedTime( &milliseconds, start, stop ); \
minTime = fmin( minTime, milliseconds ); \
assert( error <= nElements ); \
} \
std::cout << std::setw(8) << minTime << " |" << std::flush;
TIME_GPU( cudaCalculateHioError, dpIsMasked )
auto unpackedError = error;
TIME_GPU( cudaCalculateHioError, dpIsMaskedChar ) // sets error
compareFloat( __FILE__, __LINE__, unpackedError, error, sqrtf(nElements) );
TIME_GPU( cudaCalculateHioErrorBitPacked, dpBitMasked ) // sets error
compareFloat( __FILE__, __LINE__, unpackedError, error, sqrtf(nElements) );
#ifdef USE_FFTW
/* time CPU */
minTime = FLT_MAX;
for ( auto iRepetition = 0u; iRepetition < nRepetitions;
++iRepetition )
{
clock0 = clock::now();
auto error = calculateHioError( (fftwf_complex*) pData, pIsMasked, nElements );
clock1 = clock::now();
auto seconds = duration_cast<duration<double>>( clock1 - clock0 );
minTime = fmin( minTime, seconds.count() * 1000 );
assert( error <= nElements );
}
#endif
std::cout << std::setw(8) << minTime << "\n" << std::flush;
}
/* free */
CUDA_ERROR( cudaFree( dpnMaskedPixels ) );
CUDA_ERROR( cudaFree( dpTotalError ) );
CUDA_ERROR( cudaFree( dpData ) );
CUDA_ERROR( cudaFree( dpIsMasked ) );
CUDA_ERROR( cudaFree( dpBitMasked ) );
delete[] pData;
delete[] pIsMasked;
delete[] pBitMasked;
}
} // namespace algorithms
} // namespace imresh
|
cd43364a70ab022f50572fa57a7a93a13b2226a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| cd43364a70ab022f50572fa57a7a93a13b2226a9.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
df62ae2551b4799bbf4b096032e9c5e2bad08afc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ************************************************************************
// * Cuda based 2D elastic wave forward modeling and adjoint inversion *
// * **********************************************************************
// * Author: Congyue Cui *
// * https://github.com/congyue/cufdm_inv *
// * *
// * FDM calculation modified from *
// * https://github.com/Phlos/fd2d-adjoint *
// * by *
// * Nienke Blom *
// * Christian Boehm *
// * Andreas Fichtner *
// * *
// * JSON input file parsed by ArduinoJson *
// * https://github.com/bblanchon/ArduinoJson *
// * *
// ************************************************************************
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <rocblas.h>
#include <cusolverDn.h>
#include "ArduinoJson.h"
#define devij int i = blockIdx.x, j = threadIdx.x + blockIdx.y * blockDim.x
const float pi = 3.1415927;
const int nbt = 1;
__constant__ float d_pi = 3.1415927;
hipblasHandle_t cublas_handle;
hipsolverDnHandle_t solver_handle;
namespace dat{
int nx;
int nz;
int nt;
float dx;
float dz;
float dt;
float Lx;
float Lz;
dim3 nxb;
dim3 nzt;
int sfe;
int nsfe;
int order;
int model_type;
int wave_propagation_sh;
int wave_propagation_psv;
int simulation_mode;
int use_given_model;
int use_given_stf;
float source_amplitude;
int absorb_left;
int absorb_right;
int absorb_top;
int absorb_bottom;
float absorb_width;
int isrc;
int nsrc;
int nrec;
int obs_type;
float *tw;
int *stf_type; // host
float *stf_PSV_x; // host
float *stf_PSV_z; // host
float *tauw_0; // host
float *tauw; // host
float *tee_0; // host
float *f_min; // host
float *f_max; // host
float *src_x;
float *src_z;
float *rec_x;
float *rec_z;
int *src_x_id;
int *src_z_id;
int *rec_x_id;
int *rec_z_id;
float **stf_x;
float **stf_y;
float **stf_z;
float **adstf_x;
float **adstf_y;
float **adstf_z;
float **lambda;
float **mu;
float **rho;
float **absbound;
float **ux;
float **uy;
float **uz;
float **vx;
float **vy;
float **vz;
float **sxx;
float **sxy;
float **sxz;
float **szy;
float **szz;
float **dsx;
float **dsy;
float **dsz;
float **dvxdx;
float **dvxdz;
float **dvydx;
float **dvydz;
float **dvzdx;
float **dvzdz;
float **dvxdx_fw;
float **dvxdz_fw;
float **dvydx_fw;
float **dvydz_fw;
float **dvzdx_fw;
float **dvzdz_fw;
float **K_lambda;
float **K_mu;
float **K_rho;
float **v_rec_x;
float **v_rec_y;
float **v_rec_z;
float ***u_obs_x;
float ***u_obs_y;
float ***u_obs_z;
float ***ux_forward; // host
float ***uy_forward; // host
float ***uz_forward; // host
float ***vx_forward; // host
float ***vy_forward; // host
float ***vz_forward; // host
int optimization_method;
int sigma;
float **gsum;
float **gtemp;
float misfit_ref;
float lambda_ref;
float mu_ref;
float rho_ref;
float K_lambda_ref;
float K_rho_ref;
float K_mu_ref;
float **lambda_in;
float **mu_in;
float **rho_in;
}
namespace mat{
__global__ void _setValue(float *mat, const float init){
int i = blockIdx.x;
mat[i] = init;
}
__global__ void _setValue(double *mat, const double init){
int i = blockIdx.x;
mat[i] = init;
}
__global__ void _setValue(float **mat, const float init){
devij;
mat[i][j] = init;
}
__global__ void _setValue(float ***mat, const float init, const int p){
devij;
mat[p][i][j] = init;
}
__global__ void _setPointerValue(float **mat, float *data, const int n){
int i = blockIdx.x;
mat[i] = data + n * i;
}
__global__ void _setPointerValue(float ***mat, float **data, const int i){
mat[i] = data;
}
__global__ void _setIndexValue(float *a, float *b, int index){
a[0] = b[index];
}
__global__ void _copy(float **mat, float **init){
devij;
mat[i][j] = init[i][j];
}
__global__ void _copy(float **mat, float **init, float k){
devij;
mat[i][j] = init[i][j] * k;
}
__global__ void _calc(float **c, float ka, float **a, float kb, float **b){
devij;
c[i][j] = ka * a[i][j] + kb * b[i][j];
}
float *init(float *mat, const int m, const float init){
hipLaunchKernelGGL(( mat::_setValue), dim3(m), dim3(1), 0, 0, mat, init);
return mat;
}
double *init(double *mat, const int m, const double init){
hipLaunchKernelGGL(( mat::_setValue), dim3(m), dim3(1), 0, 0, mat, init);
return mat;
}
float **init(float **mat, const int m, const int n, const float init){
dim3 dimBlock(m, nbt);
hipLaunchKernelGGL(( mat::_setValue), dim3(dimBlock), dim3(n / nbt), 0, 0, mat, init);
return mat;
}
float ***init(float ***mat, const int p, const int m, const int n, const float init){
dim3 dimBlock(m, nbt);
for(int i = 0; i < p; i++){
hipLaunchKernelGGL(( mat::_setValue), dim3(dimBlock), dim3(n / nbt), 0, 0, mat, init, i);
}
return mat;
}
float *initHost(float *mat, const int m, const float init){
for(int i = 0; i < m; i++){
mat[i] = init;
}
return mat;
}
float **initHost(float **mat, const int m, const int n, const float init){
for(int i = 0; i < m; i++){
for(int j = 0; j < n; j++){
mat[i][j] = init;
}
}
return mat;
}
float ***initHost(float ***mat, const int p, const int m, const int n, float init){
for(int k = 0; k < p; k++){
for(int i = 0; i < m; i++){
for(int j = 0; j < n; j++){
mat[k][i][j] = init;
}
}
}
return mat;
}
float *create(const int m) {
float *data;
hipMalloc((void **)&data, m * sizeof(float));
return data;
}
float **create(const int m, const int n){
float *data = mat::create(m * n);
float **mat;
hipMalloc((void **)&mat, m * sizeof(float *));
hipLaunchKernelGGL(( mat::_setPointerValue), dim3(m), dim3(1), 0, 0, mat, data, n);
return mat;
}
float ***create(const int p, const int m, const int n){
float ***mat;
hipMalloc((void **)&mat, p * sizeof(float **));
for(int i = 0; i < p; i++){
hipLaunchKernelGGL(( mat::_setPointerValue), dim3(1),dim3(1), 0, 0, mat, mat::create(m, n), i);
}
return mat;
}
float *createHost(const int m) {
return (float *)malloc(m * sizeof(float));
}
float **createHost(const int m, const int n){
float *data = mat::createHost(m * n);
float **mat = (float **)malloc(m * sizeof(float *));
for(int i =0; i < m; i++){
mat[i] = data + n * i;
}
return mat;
}
float ***createHost(const int p, const int m, const int n){
float ***mat = (float ***)malloc(p * sizeof(float **));
for(int i = 0; i < p; i++){
mat[i] = mat::createHost(m, n);
}
return mat;
}
int *createInt(const int m){
int *a;
hipMalloc((void**)&a, m * sizeof(int));
return a;
}
int *createIntHost(const int m) {
return (int *)malloc(m * sizeof(int));
}
double *createDouble(const int m){
double *a;
hipMalloc((void**)&a, m * sizeof(double));
return a;
}
double *createDoubleHost(const int m) {
return (double *)malloc(m * sizeof(double));
}
float *getDataPointer(float **mat){
float **p=(float **)malloc(sizeof(float *));
hipMemcpy(p, mat , sizeof(float *), hipMemcpyDeviceToHost);
return *p;
}
void copy(float **mat, float **init, const int m, const int n){
dim3 dimBlock(m, nbt);
hipLaunchKernelGGL(( mat::_copy), dim3(dimBlock), dim3(n / nbt), 0, 0, mat, init);
}
void copy(float **mat, float **init, float k, const int m, const int n){
dim3 dimBlock(m, nbt);
hipLaunchKernelGGL(( mat::_copy), dim3(dimBlock), dim3(n / nbt), 0, 0, mat, init, k);
}
void copyHostToDevice(float *d_a, const float *a, const int m){
hipMemcpy(d_a, a , m * sizeof(float), hipMemcpyHostToDevice);
}
void copyHostToDevice(float **pd_a, float *pa, const int m, const int n){
float **phd_a=(float **)malloc(sizeof(float *));
hipMemcpy(phd_a, pd_a , sizeof(float *), hipMemcpyDeviceToHost);
hipMemcpy(*phd_a, pa , m * n * sizeof(float), hipMemcpyHostToDevice);
}
void copyHostToDevice(float **pd_a, float **pa, const int m, const int n){
float **phd_a=(float **)malloc(sizeof(float *));
hipMemcpy(phd_a, pd_a , sizeof(float *), hipMemcpyDeviceToHost);
hipMemcpy(*phd_a, *pa , m * n * sizeof(float), hipMemcpyHostToDevice);
}
void copyHostToDevice(float ***pd_a, float ***pa, const int p, const int m, const int n){
float ***phd_a=(float ***)malloc(p * sizeof(float **));
hipMemcpy(phd_a, pd_a, p * sizeof(float **), hipMemcpyDeviceToHost);
for(int i = 0; i < p; i++){
mat::copyHostToDevice(phd_a[i], pa[i], m, n);
}
}
void copyDeviceToHost(float *a, const float *d_a, const int m){
hipMemcpy(a, d_a , m * sizeof(float), hipMemcpyDeviceToHost);
}
void copyDeviceToHost(float *pa, float **pd_a, const int m, const int n){
float **phd_a=(float **)malloc(sizeof(float *));
hipMemcpy(phd_a, pd_a , sizeof(float *), hipMemcpyDeviceToHost);
hipMemcpy(pa, *phd_a , m * n * sizeof(float), hipMemcpyDeviceToHost);
}
void copyDeviceToHost(float **pa, float **pd_a, const int m, const int n){
float **phd_a=(float **)malloc(sizeof(float *));
hipMemcpy(phd_a, pd_a , sizeof(float *), hipMemcpyDeviceToHost);
hipMemcpy(*pa, *phd_a , m * n * sizeof(float), hipMemcpyDeviceToHost);
}
void copyDeviceToHost(float ***pa, float ***pd_a, const int p, const int m, const int n){
float ***phd_a=(float ***)malloc(p * sizeof(float **));
hipMemcpy(phd_a, pd_a, p * sizeof(float **), hipMemcpyDeviceToHost);
for(int i = 0; i < p; i++){
mat::copyDeviceToHost(pa[i], phd_a[i], m, n);
}
}
void calc(float **c, float ka, float **a, float kb, float **b, int m, int n){
dim3 dimBlock(m, nbt);
hipLaunchKernelGGL(( mat::_calc), dim3(dimBlock), dim3(n / nbt), 0, 0, c, ka, a, kb, b);
}
float norm(float *a, int n){
float norm_a = 0;
hipblasSnrm2(cublas_handle, n, a, 1, &norm_a);
return norm_a;
}
float norm(float **a, int m, int n){
return mat::norm(mat::getDataPointer(a), m * n);
}
float amax(float *a, int n){
int index = 0;
hipblasIsamax(cublas_handle, n, a, 1, &index);
float *b = mat::create(1);
hipLaunchKernelGGL(( mat::_setIndexValue), dim3(1), dim3(1), 0, 0, b, a, index - 1);
float *c = mat::createHost(1);
mat::copyDeviceToHost(c, b, 1);
return c[0];
}
float amax(float **a, int m, int n){
return mat::amax(mat::getDataPointer(a), m * n);
}
float dot(float *a, float *b, int n){
float dot_ab = 0;
hipblasSdot(cublas_handle, n, a, 1, b, 1, &dot_ab);
return dot_ab;
}
float dot(float **a, float **b, int m, int n){
return mat::dot(mat::getDataPointer(a), mat::getDataPointer(b), m * n);
}
void read(float *data, int n, char *fname){
char buffer[50] = "externaltools/";
strcat(buffer, fname);
FILE *file = fopen(buffer, "rb");
fwrite(data, sizeof(float), n, file);
fclose(file);
}
void write(float *data, int n, char *fname){
char buffer[50] = "externaltools/";
strcat(buffer, fname);
FILE *file = fopen(buffer, "wb");
fwrite(data, sizeof(float), n, file);
fclose(file);
}
void write(float **data, int m, int n, char *fname){
char buffer[50] = "externaltools/";
strcat(buffer, fname);
FILE *file = fopen(buffer, "wb");
for(int i = 0; i < m; i++){
fwrite(data[i], sizeof(float), n, file);
}
fclose(file);
}
void write(float ***data, int p, int m, int n, char *fname){
char buffer[50] = "externaltools/";
strcat(buffer, fname);
FILE *file = fopen(buffer, "wb");
for(int k = 0; k < p; k++){
for(int i = 0; i < m; i++){
fwrite(data[k][i], sizeof(float), n, file);
}
}
fclose(file);
}
}
dim3 &nxb = dat::nxb;
dim3 &nzt = dat::nzt;
int &sh = dat::wave_propagation_sh;
int &psv = dat::wave_propagation_psv;
int &mode = dat::simulation_mode;
int &nx = dat::nx;
int &nz = dat::nz;
int &nt = dat::nt;
int &nsrc = dat::nsrc;
int &nrec = dat::nrec;
float &dx = dat::dx;
float &dz = dat::dz;
float &dt = dat::dt;
__global__ void divSY(float **dsy, float **sxy, float **szy, float dx, float dz, int nx, int nz){
devij;
if(i >= 2 && i < nx - 2){
dsy[i][j] = 9*(sxy[i][j]-sxy[i-1][j])/(8*dx)-(sxy[i+1][j]-sxy[i-2][j])/(24*dx);
}
else{
dsy[i][j] = 0;
}
if(j >= 2 && j < nz - 2){
dsy[i][j] += 9*(szy[i][j]-szy[i][j-1])/(8*dz)-(szy[i][j+1]-szy[i][j-2])/(24*dz);
}
}
__global__ void divSXZ(float **dsx, float **dsz, float **sxx, float **szz, float **sxz, float dx, float dz, int nx, int nz){
devij;
if(i >= 2 && i < nx - 2){
dsx[i][j] = 9*(sxx[i][j]-sxx[i-1][j])/(8*dx)-(sxx[i+1][j]-sxx[i-2][j])/(24*dx);
dsz[i][j] = 9*(sxz[i][j]-sxz[i-1][j])/(8*dx)-(sxz[i+1][j]-sxz[i-2][j])/(24*dx);
}
else{
dsx[i][j] = 0;
dsz[i][j] = 0;
}
if(j >= 2 && j < nz - 2){
dsx[i][j] += 9*(sxz[i][j]-sxz[i][j-1])/(8*dz)-(sxz[i][j+1]-sxz[i][j-2])/(24*dz);
dsz[i][j] += 9*(szz[i][j]-szz[i][j-1])/(8*dz)-(szz[i][j+1]-szz[i][j-2])/(24*dz);
}
}
__global__ void divVY(float **dvydx, float **dvydz, float **vy, float dx, float dz, int nx, int nz){
devij;
if(i >= 1 && i < nx - 2){
dvydx[i][j] = 9*(vy[i+1][j]-vy[i][j])/(8*dx)-(vy[i+2][j]-vy[i-1][j])/(24*dx);
}
else{
dvydx[i][j] = 0;
}
if(j >= 1 && j < nz - 2){
dvydz[i][j] = 9*(vy[i][j+1]-vy[i][j])/(8*dz)-(vy[i][j+2]-vy[i][j-1])/(24*dz);
}
else{
dvydz[i][j] = 0;
}
}
__global__ void divVXZ(float **dvxdx, float **dvxdz, float **dvzdx, float **dvzdz, float **vx, float **vz, float dx, float dz, int nx, int nz){
devij;
if(i >= 1 && i < nx - 2){
dvxdx[i][j] = 9*(vx[i+1][j]-vx[i][j])/(8*dx)-(vx[i+2][j]-vx[i-1][j])/(24*dx);
dvzdx[i][j] = 9*(vz[i+1][j]-vz[i][j])/(8*dx)-(vz[i+2][j]-vz[i-1][j])/(24*dx);
}
else{
dvxdx[i][j] = 0;
dvzdx[i][j] = 0;
}
if(j >= 1 && j < nz - 2){
dvxdz[i][j] = 9*(vx[i][j+1]-vx[i][j])/(8*dz)-(vx[i][j+2]-vx[i][j-1])/(24*dz);
dvzdz[i][j] = 9*(vz[i][j+1]-vz[i][j])/(8*dz)-(vz[i][j+2]-vz[i][j-1])/(24*dz);
}
else{
dvxdz[i][j] = 0;
dvzdz[i][j] = 0;
}
}
__global__ void addSTF(float **dsx, float **dsy, float **dsz, float **stf_x, float **stf_y, float **stf_z,
int *src_x_id, int *src_z_id, int isrc, int sh, int psv, int it){
int is = blockIdx.x;
int xs = src_x_id[is];
int zs = src_z_id[is];
if(isrc < 0 || isrc == is){
if(sh){
dsy[xs][zs] += stf_y[is][it];
}
if(psv){
dsx[xs][zs] += stf_x[is][it];
dsz[xs][zs] += stf_z[is][it];
}
}
}
__global__ void saveV(float **v_rec_x, float **v_rec_y, float **v_rec_z, float **vx, float **vy, float **vz,
int *rec_x_id, int *rec_z_id, int sh, int psv, int it){
int ir = blockIdx.x;
int xr = rec_x_id[ir];
int zr = rec_z_id[ir];
if(sh){
v_rec_y[ir][it] = vy[xr][zr];
}
if(psv){
v_rec_x[ir][it] = vx[xr][zr];
v_rec_z[ir][it] = vz[xr][zr];
}
}
__global__ void saveV(float ***v_rec_x, float ***v_rec_y, float ***v_rec_z, float **vx, float **vy, float **vz,
int *rec_x_id, int *rec_z_id, int isrc, int sh, int psv, int it){
int ir = blockIdx.x;
int xr = rec_x_id[ir];
int zr = rec_z_id[ir];
if(sh){
v_rec_y[isrc][ir][it] = vy[xr][zr];
}
if(psv){
v_rec_x[isrc][ir][it] = vx[xr][zr];
v_rec_z[isrc][ir][it] = vz[xr][zr];
}
}
__global__ void updateV(float **v, float **ds, float **rho, float **absbound, float dt){
devij;
v[i][j] = absbound[i][j] * (v[i][j] + dt * ds[i][j] / rho[i][j]);
}
__global__ void updateSY(float **sxy, float **szy, float **dvydx, float **dvydz, float **mu, float dt){
devij;
sxy[i][j] += dt * mu[i][j] * dvydx[i][j];
szy[i][j] += dt * mu[i][j] * dvydz[i][j];
}
__global__ void updateSXZ(float **sxx, float **szz, float **sxz, float **dvxdx, float **dvxdz, float **dvzdx, float **dvzdz,
float **lambda, float **mu, float dt){
devij;
sxx[i][j] += dt * ((lambda[i][j] + 2 * mu[i][j]) * dvxdx[i][j] + lambda[i][j] * dvzdz[i][j]);
szz[i][j] += dt * ((lambda[i][j] + 2 * mu[i][j]) * dvzdz[i][j] + lambda[i][j] * dvxdx[i][j]);
sxz[i][j] += dt * (mu[i][j] * (dvxdz[i][j] + dvzdx[i][j]));
}
__global__ void updateU(float **u, float **v, float dt){
devij;
u[i][j] += v[i][j] * dt;
}
__global__ void interactionRhoY(float **K_rho, float **vy, float **vy_fw, float tsfe){
devij;
K_rho[i][j] -= vy_fw[i][j] * vy[i][j] * tsfe;
}
__global__ void interactionRhoXZ(float **K_rho, float **vx, float **vx_fw, float **vz, float **vz_fw, float tsfe){
devij;
K_rho[i][j] -= (vx_fw[i][j] * vx[i][j] + vz_fw[i][j] * vz[i][j]) * tsfe;
}
__global__ void interactionMuY(float **K_mu, float **dvydx, float **dvydx_fw, float **dvydz, float **dvydz_fw, float tsfe){
devij;
K_mu[i][j] -= (dvydx[i][j] * dvydx_fw[i][j] + dvydz[i][j] * dvydz_fw[i][j]) * tsfe;
}
__global__ void interactionMuXZ(float **K_mu, float **dvxdx, float **dvxdx_fw, float **dvxdz, float **dvxdz_fw,
float **dvzdx, float **dvzdx_fw, float **dvzdz, float **dvzdz_fw, float tsfe){
devij;
K_mu[i][j] -= (2 * dvxdx[i][j] * dvxdx_fw[i][j] + 2 * dvzdz[i][j] * dvzdz_fw[i][j] +
(dvxdz[i][j] + dvzdx[i][j]) * (dvzdx_fw[i][j] + dvxdz_fw[i][j])) * tsfe;
}
__global__ void interactionLambdaXZ(float **K_lambda, float **dvxdx, float **dvxdx_fw, float **dvzdz, float **dvzdz_fw, float tsfe){
devij;
K_lambda[i][j] -= ((dvxdx[i][j] + dvzdz[i][j]) * (dvxdx_fw[i][j] + dvzdz_fw[i][j])) * tsfe;
}
__global__ void computeIndices(int *coord_n_id, float *coord_n, float Ln, float n){
int i = blockIdx.x;
coord_n_id[i] = (int)(coord_n[i] / Ln * (n - 1) + 0.5);
}
__global__ void initialiseAbsorbingBoundaries(float **absbound, float width,
int absorb_left, int absorb_right, int absorb_bottom, int absorb_top,
float Lx, float Lz, float dx, float dz){
devij;
absbound[i][j] = 1;
float X = i * dx;
float Z = j * dz;
if(absorb_left){
if(X < width){
absbound[i][j] *= exp(-pow((X - width) / (2 * width), 2));
}
}
if(absorb_right){
if(X > Lx - width){
absbound[i][j] *= exp(-pow((X - (Lx - width)) / (2 * width), 2));
}
}
if(absorb_bottom){
if(Z < width){
absbound[i][j] *= exp(-pow((Z - width) / (2 * width), 2));
}
}
if(absorb_top){
if(Z > Lz - width){
absbound[i][j] *= exp(-pow((Z - (Lz - width)) / (2 * width), 2));
}
}
}
__global__ void prepareAdjointSTF(float **adstf, float **u_syn, float ***u_obs, float *tw, int nt, int isrc){
int it = blockIdx.x;
int irec = threadIdx.x;
adstf[irec][nt - it - 1] = (u_syn[irec][it] - u_obs[isrc][irec][it]) * tw[it] * 2;
}
__global__ void normKernel(float **model, float model_ref, float misfit_ref){
devij;
model[i][j] *= model_ref / misfit_ref;
}
__device__ float gaussian(int x, int sigma){
float xf = (float)x;
float sigmaf = (float)sigma;
return (1 / (sqrtf(2 * d_pi) * sigmaf)) * expf(-xf * xf / (2 * sigmaf * sigmaf));
}
__global__ void initialiseGaussian(float **model, int nx, int nz, int sigma){
devij;
float sumx = 0;
for(int n = 0; n < nx; n++){
sumx += gaussian(i - n, sigma);
}
float sumz = 0;
for(int n = 0; n < nz; n++){
sumz += gaussian(j - n, sigma);
}
model[i][j] = sumx * sumz;
}
__global__ void filterKernelX(float **model, float **gtemp, int nx, int sigma){
devij;
float sumx = 0;
for(int n = 0; n < nx; n++){
sumx += gaussian(i - n, sigma) * model[n][j];
}
gtemp[i][j] = sumx;
}
__global__ void filterKernelZ(float **model, float **gtemp, float **gsum, int nz, int sigma){
devij;
float sumz = 0;
for(int n = 0; n < nz; n++){
sumz += gaussian(j - n, sigma) * gtemp[i][n];
}
model[i][j] = sumz / gsum[i][j];
}
__global__ void updateModel(float **model, float **kernel, float step, float step_prev){
devij;
model[i][j] /= (1 - step_prev * kernel[i][j]);
model[i][j] *= (1 - step * kernel[i][j]);
}
__global__ void getTaperWeights(float *tw, float dt, int nt){
int it = blockIdx.x;
float t_end = (nt - 1) * dt;
float taper_width = t_end / 10;
float t_min = taper_width;
float t_max = t_end - taper_width;
float t = it * dt;
if(t <= t_min){
tw[it] = 0.5 + 0.5 * cosf(d_pi * (t_min - t) / (taper_width));
}
else if(t >= t_max){
tw[it] = 0.5 + 0.5 * cosf(d_pi * (t_max - t) / (taper_width));
}
else{
tw[it] = 1;
}
}
__global__ void calculateMisfit(float *misfit, float **u_syn, float ***u_obs, float *tw, float dt, int isrc, int irec){
int it = blockIdx.x;
float wavedif = (u_syn[irec][it] - u_obs[isrc][irec][it]) * tw[it];
misfit[it] += wavedif * wavedif * dt;
}
__global__ void reduceSystem(const double * __restrict d_in1, double * __restrict d_out1, const double * __restrict d_in2, double * __restrict d_out2, const int M, const int N) {
const int i = blockIdx.x;
const int j = threadIdx.x;
if ((i < N) && (j < N)){
d_out1[j * N + i] = d_in1[j * M + i];
d_out2[j * N + i] = d_in2[j * M + i];
}
}
static void solveQR(double *h_A, double *h_B, double *XC, const int Nrows, const int Ncols){
int work_size = 0;
int *devInfo = mat::createInt(1);
double *d_A = mat::createDouble(Nrows * Ncols);
hipMemcpy(d_A, h_A, Nrows * Ncols * sizeof(double), hipMemcpyHostToDevice);
double *d_TAU = mat::createDouble(min(Nrows, Ncols));
hipsolverDnDgeqrf_bufferSize(solver_handle, Nrows, Ncols, d_A, Nrows, &work_size);
double *work = mat::createDouble(work_size);
hipsolverDnDgeqrf(solver_handle, Nrows, Ncols, d_A, Nrows, d_TAU, work, work_size, devInfo);
double *d_Q = mat::createDouble(Nrows * Nrows);
hipsolverDnDormqr(solver_handle, HIPBLAS_SIDE_LEFT, HIPBLAS_OP_N, Nrows, Ncols, min(Nrows, Ncols), d_A, Nrows, d_TAU, d_Q, Nrows, work, work_size, devInfo);
double *d_C = mat::createDouble(Nrows * Nrows);
mat::init(d_C, Nrows * Nrows, 0);
hipMemcpy(d_C, h_B, Nrows * sizeof(double), hipMemcpyHostToDevice);
hipsolverDnDormqr(solver_handle, HIPBLAS_SIDE_LEFT, HIPBLAS_OP_T, Nrows, Ncols, min(Nrows, Ncols), d_A, Nrows, d_TAU, d_C, Nrows, work, work_size, devInfo);
double *d_R = mat::createDouble(Ncols * Ncols);
double *d_B = mat::createDouble(Ncols * Ncols);
hipLaunchKernelGGL(( reduceSystem), dim3(Ncols), dim3(Ncols), 0, 0, d_A, d_R, d_C, d_B, Nrows, Ncols);
const double alpha = 1.;
hipblasDtrsm(cublas_handle, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_N, HIPBLAS_DIAG_NON_UNIT, Ncols, Ncols,
&alpha, d_R, Ncols, d_B, Ncols);
hipMemcpy(XC, d_B, Ncols * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipFree(d_Q);
hipFree(d_R);
hipFree(d_TAU);
hipFree(devInfo);
hipFree(work);
}
static double polyfit(double *x, double *y, double *p, int n){
double *A = mat::createDoubleHost(3 * n);
for(int i = 0; i < n; i++){
A[i] = x[i] * x[i];
A[i + n] = x[i];
A[i + n * 2] = 1;
}
solveQR(A, y, p, n, 3);
double rss = 0;
for(int i = 0; i < n; i++){
double ei = p[0] * x[i] * x[i] + p[1] * x[i] + p[2];
rss += pow(y[i] - ei, 2);
}
return rss;
}
static void importData(){
FILE *datfile = fopen("externaltools/config","r");
char *buffer = 0;
long length;
fseek (datfile, 0, SEEK_END);
length = ftell (datfile);
fseek (datfile, 0, SEEK_SET);
buffer = (char *)malloc (length + 1);
fread (buffer, 1, length, datfile);
buffer[length] = '\0';
fclose(datfile);
if (buffer){
DynamicJsonBuffer jsonBuffer;
JsonObject& root = jsonBuffer.parseObject(buffer);
if (!root.success()){
printf("parseObject() failed\n");
}
else{
dat::nx = root["nx"];
dat::nz = root["nz"];
dat::nt = root["nt"];
dat::dt = root["dt"];
dat::Lx = root["Lx"];
dat::Lz = root["Lz"];
dat::sfe = root["sfe"];
dat::model_type = root["model_type"];
dat::use_given_model = root["use_given_model"];
dat::use_given_stf = root["use_given_stf"];
dat::source_amplitude = root["source_amplitude"];
dat::order = root["order"]; // order = 2: later
dat::obs_type = root["obs_type"];
dat::absorb_left = root["absorb_left"];
dat::absorb_right = root["absorb_right"];
dat::absorb_top = root["absorb_top"];
dat::absorb_bottom = root["absorb_bottom"];
dat::absorb_width = root["width"];
{
const char* wave_propagation_type = root["wave_propagation_type"].as<char*>();
if(strcmp(wave_propagation_type,"SH") == 0){
dat::wave_propagation_sh = 1;
dat::wave_propagation_psv = 0;
}
else if(strcmp(wave_propagation_type,"PSV") == 0){
dat::wave_propagation_sh = 0;
dat::wave_propagation_psv = 1;
}
else if(strcmp(wave_propagation_type,"both") == 0){
dat::wave_propagation_sh = 1;
dat::wave_propagation_psv = 1;
}
else{
dat::wave_propagation_sh = 0;
dat::wave_propagation_psv = 0;
}
}
{
int single_src = root["src_info"].is<JsonObject>();
dat::nsrc = single_src?1:root["src_info"].size();
float *src_x = mat::createHost(nsrc);
float *src_z = mat::createHost(nsrc);
dat::stf_type = mat::createIntHost(nsrc);
dat::stf_PSV_x = mat::createHost(nsrc);
dat::stf_PSV_z = mat::createHost(nsrc);
dat::tauw_0 = mat::createHost(nsrc);
dat::tauw = mat::createHost(nsrc);
dat::tee_0 = mat::createHost(nsrc);
dat::f_min = mat::createHost(nsrc);
dat::f_max = mat::createHost(nsrc);
for(int isrc = 0; isrc < nsrc; isrc++){
JsonObject& src = single_src?root["src_info"]:((JsonArray&)root["src_info"]).get<JsonObject>(isrc);
src_x[isrc] = src["loc_x"];
src_z[isrc] = src["loc_z"];
dat::stf_PSV_x[isrc] = src["stf_PSV"][0];
dat::stf_PSV_z[isrc] = src["stf_PSV"][1];
dat::tauw_0[isrc] = src["tauw_0"];
dat::tauw[isrc] = src["tauw"];
dat::tee_0[isrc] = src["tee_0"];
dat::f_min[isrc] = src["f_min"];
dat::f_max[isrc] = src["f_max"];
const char* stf_type_str = src["stf_type"].as<char*>();
if(strcmp(stf_type_str,"delta") == 0){
dat::stf_type[isrc] = 0;
}
else if(strcmp(stf_type_str,"delta_bp") == 0){
dat::stf_type[isrc] = 1;
}
else if(strcmp(stf_type_str,"ricker") == 0){
dat::stf_type[isrc] = 2;
}
else if(strcmp(stf_type_str,"heaviside_bp") == 0){
dat::stf_type[isrc] = 3;
}
else{
dat::stf_type[isrc] = -1;
}
}
dat::src_x = mat::create(nsrc);
dat::src_z = mat::create(nsrc);
mat::copyHostToDevice(dat::src_x, src_x, nsrc);
mat::copyHostToDevice(dat::src_z, src_z, nsrc);
free(src_x);
free(src_z);
}
{
int single_rec = root["rec_x"].is<float>();
dat::nrec = single_rec?1:root["rec_x"].size();
float *rec_x = mat::createHost(nrec);
float *rec_z = mat::createHost(nrec);
for(int irec = 0; irec < nrec; irec++){
rec_x[irec] = single_rec?root["rec_x"]:((JsonArray&)root["rec_x"]).get<float>(irec);
rec_z[irec] = single_rec?root["rec_z"]:((JsonArray&)root["rec_z"]).get<float>(irec);
}
dat::rec_x = mat::create(nrec);
dat::rec_z = mat::create(nrec);
mat::copyHostToDevice(dat::rec_x, rec_x, nrec);
mat::copyHostToDevice(dat::rec_z, rec_z, nrec);
free(rec_x);
free(rec_z);
}
}
jsonBuffer.clear();
}
}
static void checkMemoryUsage(){
size_t free_byte ;
size_t total_byte ;
hipMemGetInfo( &free_byte, &total_byte ) ;
float free_db = (float)free_byte ;
float total_db = (float)total_byte ;
float used_db = total_db - free_db ;
printf("memory usage: %.1fMB / %.1fMB\n", used_db / 1024.0 / 1024.0, total_db / 1024.0 / 1024.0);
}
static void makeSourceTimeFunction(float *stf, int index){
float max = 0;
float alfa = 2 * dat::tauw_0[index] / dat::tauw[index];
for(int it = 0; it < nt; it++){
float t = it * dt;
switch(dat::stf_type[index]){
case 2:{
stf[it] = (-2 * pow(alfa, 3) / pi) * (t - dat::tee_0[index]) * exp(-pow(alfa, 2) * pow(t - dat::tee_0[index], 2));
break;
}
// other stf: later
}
if(fabs(stf[it]) > max){
max = fabs(stf[it]);
}
}
if(max > 0){
for(int it = 0; it < nt; it++){
stf[it] /= max;
}
}
}
static void prepareSTF(){
float amp = dat::source_amplitude / dx / dz;
float **stf_x = mat::createHost(nsrc, nt);
float **stf_y = mat::createHost(nsrc, nt);
float **stf_z = mat::createHost(nsrc, nt);
float *stfn = mat::createHost(nt);
for(int isrc = 0; isrc < nsrc; isrc++){
makeSourceTimeFunction(stfn, isrc);
float px = dat::stf_PSV_x[isrc];
float pz = dat::stf_PSV_z[isrc];
float norm = sqrt(pow(px,2) + pow(pz,2));
for(int it = 0; it < nt; it++){
stf_x[isrc][it] = amp * stfn[it] * px / norm;
stf_y[isrc][it] = amp * stfn[it];
stf_z[isrc][it] = amp * stfn[it] * pz / norm;
}
}
mat::copyHostToDevice(dat::stf_x, stf_x, nsrc, nt);
mat::copyHostToDevice(dat::stf_y, stf_y, nsrc, nt);
mat::copyHostToDevice(dat::stf_z, stf_z, nsrc, nt);
free(*stf_x);
free(*stf_y);
free(*stf_z);
free(stf_x);
free(stf_y);
free(stf_z);
free(stfn);
}
static void defineMaterialParameters(){
// other model_type: later
switch(dat::model_type){
case 1:{
mat::init(dat::rho, nx, nz, 3000);
mat::init(dat::mu, nx, nz, 4.8e10);
mat::init(dat::lambda, nx, nz, 4.8e10);
break;
}
case 10:{
mat::init(dat::rho, nx, nz, 2600);
mat::init(dat::mu, nx, nz, 2.66e10);
mat::init(dat::lambda, nx, nz, 3.42e10);
break;
}
case 13:{
mat::init(dat::mu, nx, nz, 2.66e10);
mat::init(dat::lambda, nx, nz, 3.42e10);
float rho = 2600;
float mu = 2.66e10;
float lambda = 3.42e10;
float vp = sqrt((lambda + 2*mu) / rho);
float vs = sqrt(mu / rho);
int left = (int)((float)nx / 2 - (float)nx / 20 + 0.5);
int right = (int)((float)nx / 2 + (float)nx / 20 + 0.5);
int bottom = (int)((float)nz / 2 - (float)nz / 20 + 0.5);
int top = (int)((float)nz / 2 + (float)nz / 20 + 0.5);
float **rho2 = mat::createHost(nx, nz);
mat::initHost(rho2, nx, nz, 2600);
for(int i = left; i < right; i++){
for(int j = bottom; j < top; j++){
rho2[i][j] = 2800;
}
}
mat::copyHostToDevice(dat::rho, rho2, nx, nz);
free(*rho2);
free(rho2);
}
}
}
static void initialiseDynamicFields(){
if(sh){
mat::init(dat::vy, nx, nz, 0);
mat::init(dat::uy, nx, nz, 0);
mat::init(dat::sxy, nx, nz, 0);
mat::init(dat::szy, nx, nz, 0);
}
if(psv){
mat::init(dat::vx, nx, nz, 0);
mat::init(dat::vz, nx, nz, 0);
mat::init(dat::ux, nx, nz, 0);
mat::init(dat::uz, nx, nz, 0);
mat::init(dat::sxx, nx, nz, 0);
mat::init(dat::szz, nx, nz, 0);
mat::init(dat::sxz, nx, nz, 0);
}
}
static void initialiseKernels(){
mat::init(dat::K_lambda, nx, nz, 0);
mat::init(dat::K_mu, nx, nz, 0);
mat::init(dat::K_rho, nx, nz, 0);
}
static void runWaveFieldPropagation(){
initialiseDynamicFields();
for(int it = 0; it < nt; it++){
if(mode == 0){
if((it + 1) % dat::sfe == 0){
int isfe = dat::nsfe - (it + 1) / dat::sfe;
if(sh){
mat::copyDeviceToHost(dat::uy_forward[isfe], dat::uy, nx, nz);
}
if(psv){
mat::copyDeviceToHost(dat::ux_forward[isfe], dat::ux, nx, nz);
mat::copyDeviceToHost(dat::uz_forward[isfe], dat::uz, nx, nz);
}
}
}
if(sh){
hipLaunchKernelGGL(( divSY), dim3(nxb), dim3(nzt), 0, 0, dat::dsy, dat::sxy, dat::szy, dx, dz, nx, nz);
}
if(psv){
hipLaunchKernelGGL(( divSXZ), dim3(nxb), dim3(nzt), 0, 0, dat::dsx, dat::dsz, dat::sxx, dat::szz, dat::sxz, dx, dz, nx, nz);
}
if(mode == 0){
hipLaunchKernelGGL(( addSTF), dim3(nsrc), dim3(1), 0, 0,
dat::dsx, dat::dsy, dat::dsz, dat::stf_x, dat::stf_y, dat::stf_z,
dat::src_x_id, dat::src_z_id, dat::isrc, sh, psv, it
);
}
else if(mode == 1){
hipLaunchKernelGGL(( addSTF), dim3(nrec), dim3(1), 0, 0,
dat::dsx, dat::dsy, dat::dsz, dat::adstf_x, dat::adstf_y, dat::adstf_z,
dat::rec_x_id, dat::rec_z_id, -1, sh, psv, it
);
}
if(sh){
hipLaunchKernelGGL(( updateV), dim3(nxb), dim3(nzt), 0, 0, dat::vy, dat::dsy, dat::rho, dat::absbound, dt);
hipLaunchKernelGGL(( divVY), dim3(nxb), dim3(nzt), 0, 0, dat::dvydx, dat::dvydz, dat::vy, dx, dz, nx, nz);
hipLaunchKernelGGL(( updateSY), dim3(nxb), dim3(nzt), 0, 0, dat::sxy, dat::szy, dat::dvydx, dat::dvydz, dat::mu, dt);
hipLaunchKernelGGL(( updateU), dim3(nxb), dim3(nzt), 0, 0, dat::uy, dat::vy, dt);
}
if(psv){
hipLaunchKernelGGL(( updateV), dim3(nxb), dim3(nzt), 0, 0, dat::vx, dat::dsx, dat::rho, dat::absbound, dt);
hipLaunchKernelGGL(( updateV), dim3(nxb), dim3(nzt), 0, 0, dat::vz, dat::dsz, dat::rho, dat::absbound, dt);
hipLaunchKernelGGL(( divVXZ), dim3(nxb), dim3(nzt), 0, 0, dat::dvxdx, dat::dvxdz, dat::dvzdx, dat::dvzdz, dat::vx, dat::vz, dx, dz, nx, nz);
hipLaunchKernelGGL(( updateSXZ), dim3(nxb), dim3(nzt), 0, 0, dat::sxx, dat::szz, dat::sxz, dat::dvxdx, dat::dvxdz, dat::dvzdx, dat::dvzdz, dat::lambda, dat::mu, dt);
hipLaunchKernelGGL(( updateU), dim3(nxb), dim3(nzt), 0, 0, dat::ux, dat::vx, dt);
hipLaunchKernelGGL(( updateU), dim3(nxb), dim3(nzt), 0, 0, dat::uz, dat::vz, dt);
}
if(mode == 0){
if(dat::obs_type == 0){
hipLaunchKernelGGL(( saveV), dim3(nrec), dim3(1), 0, 0,
dat::v_rec_x, dat::v_rec_y, dat::v_rec_z, dat::vx, dat::vy, dat::vz,
dat::rec_x_id, dat::rec_z_id, sh, psv, it
);
}
else if(dat::obs_type == 1){
hipLaunchKernelGGL(( saveV), dim3(nrec), dim3(1), 0, 0,
dat::v_rec_x, dat::v_rec_y, dat::v_rec_z, dat::ux, dat::uy, dat::uz,
dat::rec_x_id, dat::rec_z_id, sh, psv, it
);
}
else if(dat::obs_type == 2 && dat::isrc >= 0){
hipLaunchKernelGGL(( saveV), dim3(nrec), dim3(1), 0, 0,
dat::u_obs_x, dat::u_obs_y, dat::u_obs_z, dat::ux, dat::uy, dat::uz,
dat::rec_x_id, dat::rec_z_id, dat::isrc, sh, psv, it
);
}
if((it + 1) % dat::sfe == 0){
int isfe = dat::nsfe - (it + 1) / dat::sfe;
if(sh){
mat::copyDeviceToHost(dat::vy_forward[isfe], dat::vy, nx, nz);
}
if(psv){
mat::copyDeviceToHost(dat::vx_forward[isfe], dat::vx, nx, nz);
mat::copyDeviceToHost(dat::vz_forward[isfe], dat::vz, nx, nz);
}
}
}
else if(mode == 1){
if((it + dat::sfe) % dat::sfe == 0){
// dsi -> ui_fw -> vi_fw
int isfe = (it + dat::sfe) / dat::sfe - 1;
float tsfe = dat::sfe * dt;
if(sh){
mat::copyHostToDevice(dat::dsy, dat::uy_forward[isfe], nx, nz);
hipLaunchKernelGGL(( divVY), dim3(nxb), dim3(nzt), 0, 0, dat::dvydx, dat::dvydz, dat::uy, dx, dz, nx, nz);
hipLaunchKernelGGL(( divVY), dim3(nxb), dim3(nzt), 0, 0, dat::dvydx_fw, dat::dvydz_fw, dat::dsy, dx, dz, nx, nz);
mat::copyHostToDevice(dat::dsy, dat::vy_forward[isfe], nx, nz);
hipLaunchKernelGGL(( interactionRhoY), dim3(nxb), dim3(nzt), 0, 0, dat::K_rho, dat::vy, dat::dsy, tsfe);
hipLaunchKernelGGL(( interactionMuY), dim3(nxb), dim3(nzt), 0, 0, dat::K_mu, dat::dvydx, dat::dvydx_fw, dat::dvydz, dat::dvydz_fw, tsfe);
}
if(psv){
mat::copyHostToDevice(dat::dsx, dat::ux_forward[isfe], nx, nz);
mat::copyHostToDevice(dat::dsz, dat::uz_forward[isfe], nx, nz);
hipLaunchKernelGGL(( divVXZ), dim3(nxb), dim3(nzt), 0, 0,
dat::dvxdx, dat::dvxdz, dat::dvzdx, dat::dvzdz,
dat::ux, dat::uz, dx, dz, nx, nz
);
hipLaunchKernelGGL(( divVXZ), dim3(nxb), dim3(nzt), 0, 0,
dat::dvxdx_fw, dat::dvxdz_fw, dat::dvzdx_fw, dat::dvzdz_fw,
dat::dsx, dat::dsz, dx, dz, nx, nz
);
mat::copyHostToDevice(dat::dsx, dat::vx_forward[isfe], nx, nz);
mat::copyHostToDevice(dat::dsz, dat::vz_forward[isfe], nx, nz);
hipLaunchKernelGGL(( interactionRhoXZ), dim3(nxb), dim3(nzt), 0, 0, dat::K_rho, dat::vx, dat::dsx, dat::vz, dat::dsz, tsfe);
hipLaunchKernelGGL(( interactionMuXZ), dim3(nxb), dim3(nzt), 0, 0,
dat::K_mu, dat::dvxdx, dat::dvxdx_fw, dat::dvxdz, dat::dvxdz_fw,
dat::dvzdx, dat::dvzdx_fw, dat::dvzdz, dat::dvzdz_fw, tsfe
);
hipLaunchKernelGGL(( interactionLambdaXZ), dim3(nxb), dim3(nzt), 0, 0, dat::K_lambda, dat::dvxdx, dat::dvxdx_fw, dat::dvzdz, dat::dvzdz_fw, tsfe);
}
}
}
}
}
static void checkArgs(int adjoint){
dat::nxb = dim3(nx, nbt);
dat::nzt = dim3(nz / nbt);
if(nt % dat::sfe != 0){
nt = dat::sfe * (int)((float)nt / dat::sfe + 0.5);
}
dat::nsfe = nt / dat::sfe;
dat::dx = dat::Lx / (nx - 1);
dat::dz = dat::Lz / (nz - 1);
if(sh){
dat::vy = mat::create(nx, nz);
dat::uy = mat::create(nx, nz);
dat::sxy = mat::create(nx, nz);
dat::szy = mat::create(nx, nz);
dat::dsy = mat::create(nx, nz);
dat::dvydx = mat::create(nx, nz);
dat::dvydz = mat::create(nx, nz);
dat::v_rec_y = mat::create(nrec, nt);
dat::uy_forward = mat::createHost(dat::nsfe, nx, nz);
dat::vy_forward = mat::createHost(dat::nsfe, nx, nz);
}
if(psv){
dat::vx = mat::create(nx, nz);
dat::vz = mat::create(nx, nz);
dat::ux = mat::create(nx, nz);
dat::uz = mat::create(nx, nz);
dat::sxx = mat::create(nx, nz);
dat::szz = mat::create(nx, nz);
dat::sxz = mat::create(nx, nz);
dat::dsx = mat::create(nx, nz);
dat::dsz = mat::create(nx, nz);
dat::dvxdx = mat::create(nx, nz);
dat::dvxdz = mat::create(nx, nz);
dat::dvzdx = mat::create(nx, nz);
dat::dvzdz = mat::create(nx, nz);
dat::v_rec_x = mat::create(nrec, nt);
dat::v_rec_z = mat::create(nrec, nt);
dat::ux_forward = mat::createHost(dat::nsfe, nx, nz);
dat::uz_forward = mat::createHost(dat::nsfe, nx, nz);
dat::vx_forward = mat::createHost(dat::nsfe, nx, nz);
dat::vz_forward = mat::createHost(dat::nsfe, nx, nz);
}
dat::absbound = mat::create(nx, nz);
dat::lambda = mat::create(nx, nz);
dat::rho = mat::create(nx, nz);
dat::mu = mat::create(nx, nz);
dat::stf_x = mat::create(nsrc, nt);
dat::stf_y = mat::create(nsrc, nt);
dat::stf_z = mat::create(nsrc, nt);
if(adjoint){
if(sh){
dat::dvydx_fw = mat::create(nx, nz);
dat::dvydz_fw = mat::create(nx, nz);
}
if(psv){
dat::dvxdx_fw = mat::create(nx, nz);
dat::dvxdz_fw = mat::create(nx, nz);
dat::dvzdx_fw = mat::create(nx, nz);
dat::dvzdz_fw = mat::create(nx, nz);
}
dat::K_lambda = mat::create(nx, nz);
dat::K_mu = mat::create(nx, nz);
dat::K_rho = mat::create(nx, nz);
dat::adstf_x = mat::create(nrec, nt);
dat::adstf_y = mat::create(nrec, nt);
dat::adstf_z = mat::create(nrec, nt);
}
dat::src_x_id = mat::createInt(nsrc);
dat::src_z_id = mat::createInt(nsrc);
dat::rec_x_id = mat::createInt(nrec);
dat::rec_z_id = mat::createInt(nrec);
hipLaunchKernelGGL(( computeIndices), dim3(nsrc), dim3(1), 0, 0, dat::src_x_id, dat::src_x, dat::Lx, nx);
hipLaunchKernelGGL(( computeIndices), dim3(nsrc), dim3(1), 0, 0, dat::src_z_id, dat::src_z, dat::Lz, nz);
hipLaunchKernelGGL(( computeIndices), dim3(nrec), dim3(1), 0, 0, dat::rec_x_id, dat::rec_x, dat::Lx, nx);
hipLaunchKernelGGL(( computeIndices), dim3(nrec), dim3(1), 0, 0, dat::rec_z_id, dat::rec_z, dat::Lz, nz);
hipLaunchKernelGGL(( initialiseAbsorbingBoundaries), dim3(nxb), dim3(nzt), 0, 0,
dat::absbound, dat::absorb_width,
dat::absorb_left, dat::absorb_right, dat::absorb_bottom, dat::absorb_top,
dat::Lx, dat::Lz, dx, dz
);
float *t = mat::createHost(nt);
for(int it = 0; it < nt; it++){
t[it] = it * dt;
}
mat::write(t, nt, "t");
}
static void runForward(int isrc){
dat::simulation_mode = 0;
dat::isrc = isrc;
runWaveFieldPropagation();
// float **v_rec_x=mat::createHost(dat::nrec, dat::nt);
// float **v_rec_z=mat::createHost(dat::nrec, dat::nt);
// mat::copyDeviceToHost(v_rec_x, dat::v_rec_x, dat::nrec, dat::nt);
// mat::copyDeviceToHost(v_rec_z, dat::v_rec_z, dat::nrec, dat::nt);
// mat::write(v_rec_x, dat::nrec, dat::nt, "vx_rec");
// mat::write(v_rec_z, dat::nrec, dat::nt, "vz_rec");
// mat::write(dat::vx_forward, dat::nsfe, dat::nx, dat::nz, "vx");
// mat::write(dat::vz_forward, dat::nsfe, dat::nx, dat::nz, "vz");
}
static void runAdjoint(int init_kernel){
dat::simulation_mode = 1;
if(init_kernel){
initialiseKernels();
}
runWaveFieldPropagation();
// float **rho = mat::createHost(dat::nx, dat::nz);
// float **mu = mat::createHost(dat::nx, dat::nz);
// float **lambda = mat::createHost(dat::nx, dat::nz);
// mat::copyDeviceToHost(rho, dat::K_rho, dat::nx, dat::nz);
// mat::copyDeviceToHost(mu, dat::K_mu, dat::nx, dat::nz);
// mat::copyDeviceToHost(lambda, dat::K_lambda, dat::nx, dat::nz);
// mat::write(rho, dat::nx, dat::nz, "rho");
// mat::write(mu, dat::nx, dat::nz, "mu");
// mat::write(lambda, dat::nx, dat::nz, "lambda");
// mat::write(dat::vx_forward, dat::nsfe, dat::nx, dat::nz, "vx");
// mat::write(dat::vz_forward, dat::nsfe, dat::nx, dat::nz, "vz");
}
static float computeKernels(int kernel){
float *d_misfit = mat::create(nt);
float *h_misfit = mat::createHost(nt);
mat::init(d_misfit, nt, 0);
if(kernel){
initialiseKernels();
}
for(int isrc = 0; isrc < nsrc; isrc++){
runForward(isrc);
for(int irec = 0; irec < nrec; irec++){
hipLaunchKernelGGL(( calculateMisfit), dim3(nt), dim3(1), 0, 0, d_misfit, dat::v_rec_x, dat::u_obs_x, dat::tw, dt, isrc, irec);
hipLaunchKernelGGL(( calculateMisfit), dim3(nt), dim3(1), 0, 0, d_misfit, dat::v_rec_z, dat::u_obs_z, dat::tw, dt, isrc, irec);
}
if(kernel){
hipLaunchKernelGGL(( prepareAdjointSTF), dim3(nt), dim3(nrec), 0, 0, dat::adstf_x, dat::v_rec_x, dat::u_obs_x, dat::tw, nt, isrc);
hipLaunchKernelGGL(( prepareAdjointSTF), dim3(nt), dim3(nrec), 0, 0, dat::adstf_z, dat::v_rec_z, dat::u_obs_z, dat::tw, nt, isrc);
mat::init(dat::adstf_y, nrec, nt, 0);
runAdjoint(0);
}
}
mat::copyDeviceToHost(h_misfit, d_misfit, nt);
float misfit = 0;
for(int i = 0; i< nt; i++){
misfit += h_misfit[i];
}
free(h_misfit);
hipFree(d_misfit);
if(kernel){
if(dat::misfit_ref < 0){
dat::misfit_ref = misfit;
}
hipLaunchKernelGGL(( normKernel), dim3(nxb), dim3(nzt), 0, 0, dat::K_rho, dat::rho_ref, dat::misfit_ref);
hipLaunchKernelGGL(( normKernel), dim3(nxb), dim3(nzt), 0, 0, dat::K_mu, dat::mu_ref, dat::misfit_ref);
hipLaunchKernelGGL(( normKernel), dim3(nxb), dim3(nzt), 0, 0, dat::K_lambda, dat::lambda_ref, dat::misfit_ref);
hipLaunchKernelGGL(( filterKernelX), dim3(nxb), dim3(nzt), 0, 0, dat::K_rho, dat::gtemp, nx, dat::sigma);
hipLaunchKernelGGL(( filterKernelZ), dim3(nxb), dim3(nzt), 0, 0, dat::K_rho, dat::gtemp, dat::gsum, nz, dat::sigma);
hipLaunchKernelGGL(( filterKernelX), dim3(nxb), dim3(nzt), 0, 0, dat::K_mu, dat::gtemp, nx, dat::sigma);
hipLaunchKernelGGL(( filterKernelZ), dim3(nxb), dim3(nzt), 0, 0, dat::K_mu, dat::gtemp, dat::gsum, nz, dat::sigma);
hipLaunchKernelGGL(( filterKernelX), dim3(nxb), dim3(nzt), 0, 0, dat::K_lambda, dat::gtemp, nx, dat::sigma);
hipLaunchKernelGGL(( filterKernelZ), dim3(nxb), dim3(nzt), 0, 0, dat::K_lambda, dat::gtemp, dat::gsum, nz, dat::sigma);
}
return misfit / dat::misfit_ref;
}
static float findMaxAbs(double *a, int n){
double max = fabs(a[0]);
for(int i = 1; i < n; i++){
if(fabs(a[i]) > max){
max = fabs(a[i]);
}
}
return max;
}
static float updateModels(float step, float step_prev){
//hipLaunchKernelGGL(( updateModel), dim3(nxb), dim3(nzt), 0, 0, dat::lambda, dat::K_lambda, step, step_prev);
//hipLaunchKernelGGL(( updateModel), dim3(nxb), dim3(nzt), 0, 0, dat::mu, dat::K_mu, step, step_prev);
hipLaunchKernelGGL(( updateModel), dim3(nxb), dim3(nzt), 0, 0, dat::rho, dat::K_rho, step, step_prev);
return step;
}
static float calculateStepLength(float teststep, float misfit, int iter){
int nsteps = iter?3:5;
double *stepInArray = mat::createDoubleHost(nsteps);
double *misfitArray = mat::createDoubleHost(nsteps);
double *p = mat::createDoubleHost(3);
for(int i = 0; i < nsteps; i++){
stepInArray[i] = 2 * i * teststep / (nsteps - 1);
}
misfitArray[0] = misfit;
double minmisfit = misfit;
double maxmisfit = misfit;
int n_prev = nsteps;
double *stepInArray_prev = NULL;
double *misfitArray_prev = NULL;
double *stepInArray_new = NULL;
double *misfitArray_new = NULL;
double step_prev = stepInArray[0];
for(int i = 1; i < nsteps; i++){
step_prev = updateModels(stepInArray[i], step_prev);
misfitArray[i] = computeKernels(0);
if(misfitArray[i] < minmisfit){
minmisfit = misfitArray[i];
}
if(misfitArray[i] > maxmisfit){
maxmisfit = misfitArray[i];
}
}
double rss = polyfit(stepInArray, misfitArray, p, nsteps);
double step = -p[1] / (2 * p[0]);
double fitGoodness = rss / (maxmisfit - minmisfit);
double minval=p[0]*step*step+p[1]*step+p[2];
printf("p = [%f, %f, %f]\n",p[0],p[1],p[2]);
printf("s = [%f, %f, %f, %f, %f]\n",stepInArray[0],stepInArray[1],stepInArray[2],nsteps==3?0:stepInArray[3],nsteps==3?0:stepInArray[4]);
printf("m = [%f, %f, %f, %f, %f]\n",misfitArray[0],misfitArray[1],misfitArray[2],nsteps==3?0:misfitArray[3],nsteps==3?0:misfitArray[4]);
printf("step=%e rss=%e fg=%e misfit=%f minval=%f\n",step,rss, fitGoodness,misfit, minval);
int nextra = 0;
int idxEmpty;
while((p[0] < 0 || step < 0 || fitGoodness > 0.1) && nextra < 5){
if(nextra == 0){
stepInArray_prev = mat::createDoubleHost(nsteps);
misfitArray_prev = mat::createDoubleHost(nsteps);
for(int i = 0; i < nsteps; i++){
stepInArray_prev[i] = stepInArray[i];
misfitArray_prev[i] = misfitArray[i];
}
}
stepInArray_new = mat::createDoubleHost(3);
misfitArray_new = mat::createDoubleHost(3);
stepInArray_new[0] = 0;
misfitArray_new[0] = misfitArray_prev[0];
if(p[0] < 0 && step < 0){
stepInArray_new[1] = stepInArray_prev[n_prev - 1];
stepInArray_new[2] = 2 * findMaxAbs(stepInArray_prev, n_prev);
misfitArray_new[1] = misfitArray_prev[n_prev - 1];
idxEmpty = 2;
}
else{
stepInArray_new[1] = stepInArray_prev[1] / 3;
stepInArray_new[2] = stepInArray_prev[1];
misfitArray_new[2] = misfitArray_prev[1];
idxEmpty = 1;
}
step_prev = updateModels(stepInArray_new[idxEmpty], step_prev);
misfitArray_new[idxEmpty] = computeKernels(0);
rss = polyfit(stepInArray_new, misfitArray_new, p, 3);
step = -p[1] / (2 * p[0]);
fitGoodness = rss / (maxmisfit - minmisfit);
double minval=p[0]*step*step+p[1]*step+p[2];
printf("\np = [%f, %f, %f]\n",p[0],p[1],p[2]);
printf("s = [%f, %f, %f]\n",stepInArray_new[0],stepInArray_new[1],stepInArray_new[2]);
printf("m = [%f, %f, %f]\n",misfitArray_new[0],misfitArray_new[1],misfitArray_new[2]);
printf("step=%e rss=%e fg=%e misfit=%f minval=%f\n",step,rss, fitGoodness,misfit, minval);
nextra++;
n_prev = 3;
free(stepInArray_prev);
free(misfitArray_prev);
stepInArray_prev = stepInArray_new;
misfitArray_prev = misfitArray_new;
}
printf("\n\n\n");
free(p);
free(stepInArray);
free(misfitArray);
free(stepInArray_new);
free(misfitArray_new);
return updateModels(step, step_prev);
}
static void inversionRoutine(){
hipblasCreate(&cublas_handle);
hipsolverDnCreate(&solver_handle);
int niter = 20;
float step = 0.004;
float **lambda = mat::createHost(nx,nz);
float **mu = mat::createHost(nx,nz);
float **rho = mat::createHost(nx,nz);
{ // later
mat::copyDeviceToHost(rho, dat::rho, nx, nz);
mat::copyDeviceToHost(mu, dat::mu, nx, nz);
mat::copyDeviceToHost(lambda, dat::lambda, nx, nz);
mat::write(rho, nx, nz, "rho0");
mat::write(mu, nx, nz, "mu0");
mat::write(lambda, nx, nz, "lambda0");
}
// taper weights
dat::tw = mat::create(nt);
hipLaunchKernelGGL(( getTaperWeights), dim3(nt), dim3(1), 0, 0, dat::tw, dt, nt);
// gaussian filter
dat::sigma = 2;
dat::gsum = mat::create(nx, nz);
dat::gtemp = mat::create(nx, nz);
hipLaunchKernelGGL(( initialiseGaussian), dim3(nxb), dim3(nzt), 0, 0, dat::gsum, nx, nz, dat::sigma);
// adjoint related parameters
dat::obs_type = 1;
dat::misfit_ref = -1;
dat::K_lambda_ref = -1;
dat::K_mu_ref = -1;
dat::K_rho_ref = -1;
int &opm = dat::optimization_method;
if(opm == 1 || opm == 2){
float **g = mat::create(nx, nz);
float **h = mat::create(nx, nz);
float **gpr = mat::create(nx, nz);
float **sch = mat::create(nx, nz);
float misfit = computeKernels(1);
mat::copy(g, dat::rho, nx, nz);
float ng = mat::amax(g, nx, nz);
float n2g = mat::norm(g, nx, nz);
float gg = n2g * n2g;
float gam = 0;
mat::init(h, nx, nz, 0);
float nh = 0;
// reduce hipMalloc: later
for(int iter = 0; iter < niter; iter++){
printf("iter = %d\n", iter + 1);
float ggpr = gg;
mat::copy(gpr, g, nx, nz);
mat::calc(h, gam, h, 1, g, nx, nz);
if (mat::dot(g, h, nx, nz) <= 1e-3 * n2g * nh) {
mat::copy(h, g, nx, nz);
nh = n2g;
}
else{
nh = mat::norm(h, nx, nz);
}
mat::copy(dat::rho, h, nx, nz);
step = calculateStepLength(step, misfit, iter);
misfit = computeKernels(1);
mat::copy(g, dat::rho, nx, nz);
ng = mat::amax(g, nx, nz);
n2g = mat::norm(g, nx, nz);
gg = n2g * n2g;
if(opm == 1){
gam = gg / ggpr;
}
else{
mat::calc(gpr, 1, g, -1, gpr, nx, nz);
gam = mat::dot(gpr, g, nx, nz) / ggpr;
}
{ // later
char lname[10], mname[10], rname[10];
char lname2[10], mname2[10], rname2[10];
sprintf(lname, "lambda%d", iter + 1);
sprintf(lname2, "klambda%d", iter + 1);
sprintf(mname, "mu%d", iter + 1);
sprintf(mname2, "kmu%d", iter + 1);
sprintf(rname, "rho%d", iter + 1);
sprintf(rname2, "krho%d", iter + 1);
mat::copyDeviceToHost(rho, dat::rho, nx, nz);
mat::copyDeviceToHost(mu, dat::mu, nx, nz);
mat::copyDeviceToHost(lambda, dat::lambda, nx, nz);
mat::write(rho, nx, nz, rname);
mat::write(mu, nx, nz, mname);
mat::write(lambda, nx, nz, lname);
mat::copyDeviceToHost(rho, dat::K_rho, nx, nz);
mat::copyDeviceToHost(mu, dat::K_mu, nx, nz);
mat::copyDeviceToHost(lambda, dat::K_lambda, nx, nz);
mat::write(rho, nx, nz, rname2);
mat::write(mu, nx, nz, mname2);
mat::write(lambda, nx, nz, lname2);
}
}
}
else{
for(int iter = 0; iter < niter; iter++){
printf("iter = %d\n", iter + 1);
float misfit = computeKernels(1);
step = calculateStepLength(step, misfit, iter);
}
}
hipblasDestroy(cublas_handle);
hipsolverDnDestroy(solver_handle);
}
static void runSyntheticInvertion(){
checkArgs(1);
dat::obs_type = 2; // save displacement persouce
dat::model_type = 13; // true model: later
prepareSTF(); // dat::use_given_stf, sObsPerFreq: later
defineMaterialParameters(); // dat::use_given_model: later
dat::u_obs_x = mat::create(nsrc, nrec, nt);
dat::u_obs_z = mat::create(nsrc, nrec, nt);
for(int isrc = 0; isrc < nsrc; isrc++){
runForward(isrc);
}
dat::model_type = 10;
dat::rho_ref = 2600;
dat::mu_ref = 2.66e10;
dat::lambda_ref = 3.42e10;
dat::optimization_method = 2;
defineMaterialParameters();
inversionRoutine();
}
int main(int argc , char *argv[]){
importData();
if(argc == 1){
runSyntheticInvertion();
}
else{
for(int i = 1; i< argc; i++){
if(strcmp(argv[i],"run_forward") == 0){
checkArgs(0);
prepareSTF();
defineMaterialParameters();
runForward(-1);
}
}
}
checkMemoryUsage();
return 0;
}
| df62ae2551b4799bbf4b096032e9c5e2bad08afc.cu | // ************************************************************************
// * Cuda based 2D elastic wave forward modeling and adjoint inversion *
// * **********************************************************************
// * Author: Congyue Cui *
// * https://github.com/congyue/cufdm_inv *
// * *
// * FDM calculation modified from *
// * https://github.com/Phlos/fd2d-adjoint *
// * by *
// * Nienke Blom *
// * Christian Boehm *
// * Andreas Fichtner *
// * *
// * JSON input file parsed by ArduinoJson *
// * https://github.com/bblanchon/ArduinoJson *
// * *
// ************************************************************************
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <cublas_v2.h>
#include <cusolverDn.h>
#include "ArduinoJson.h"
#define devij int i = blockIdx.x, j = threadIdx.x + blockIdx.y * blockDim.x
const float pi = 3.1415927;
const int nbt = 1;
__constant__ float d_pi = 3.1415927;
cublasHandle_t cublas_handle;
cusolverDnHandle_t solver_handle;
namespace dat{
int nx;
int nz;
int nt;
float dx;
float dz;
float dt;
float Lx;
float Lz;
dim3 nxb;
dim3 nzt;
int sfe;
int nsfe;
int order;
int model_type;
int wave_propagation_sh;
int wave_propagation_psv;
int simulation_mode;
int use_given_model;
int use_given_stf;
float source_amplitude;
int absorb_left;
int absorb_right;
int absorb_top;
int absorb_bottom;
float absorb_width;
int isrc;
int nsrc;
int nrec;
int obs_type;
float *tw;
int *stf_type; // host
float *stf_PSV_x; // host
float *stf_PSV_z; // host
float *tauw_0; // host
float *tauw; // host
float *tee_0; // host
float *f_min; // host
float *f_max; // host
float *src_x;
float *src_z;
float *rec_x;
float *rec_z;
int *src_x_id;
int *src_z_id;
int *rec_x_id;
int *rec_z_id;
float **stf_x;
float **stf_y;
float **stf_z;
float **adstf_x;
float **adstf_y;
float **adstf_z;
float **lambda;
float **mu;
float **rho;
float **absbound;
float **ux;
float **uy;
float **uz;
float **vx;
float **vy;
float **vz;
float **sxx;
float **sxy;
float **sxz;
float **szy;
float **szz;
float **dsx;
float **dsy;
float **dsz;
float **dvxdx;
float **dvxdz;
float **dvydx;
float **dvydz;
float **dvzdx;
float **dvzdz;
float **dvxdx_fw;
float **dvxdz_fw;
float **dvydx_fw;
float **dvydz_fw;
float **dvzdx_fw;
float **dvzdz_fw;
float **K_lambda;
float **K_mu;
float **K_rho;
float **v_rec_x;
float **v_rec_y;
float **v_rec_z;
float ***u_obs_x;
float ***u_obs_y;
float ***u_obs_z;
float ***ux_forward; // host
float ***uy_forward; // host
float ***uz_forward; // host
float ***vx_forward; // host
float ***vy_forward; // host
float ***vz_forward; // host
int optimization_method;
int sigma;
float **gsum;
float **gtemp;
float misfit_ref;
float lambda_ref;
float mu_ref;
float rho_ref;
float K_lambda_ref;
float K_rho_ref;
float K_mu_ref;
float **lambda_in;
float **mu_in;
float **rho_in;
}
namespace mat{
__global__ void _setValue(float *mat, const float init){
int i = blockIdx.x;
mat[i] = init;
}
__global__ void _setValue(double *mat, const double init){
int i = blockIdx.x;
mat[i] = init;
}
__global__ void _setValue(float **mat, const float init){
devij;
mat[i][j] = init;
}
__global__ void _setValue(float ***mat, const float init, const int p){
devij;
mat[p][i][j] = init;
}
__global__ void _setPointerValue(float **mat, float *data, const int n){
int i = blockIdx.x;
mat[i] = data + n * i;
}
__global__ void _setPointerValue(float ***mat, float **data, const int i){
mat[i] = data;
}
__global__ void _setIndexValue(float *a, float *b, int index){
a[0] = b[index];
}
__global__ void _copy(float **mat, float **init){
devij;
mat[i][j] = init[i][j];
}
__global__ void _copy(float **mat, float **init, float k){
devij;
mat[i][j] = init[i][j] * k;
}
__global__ void _calc(float **c, float ka, float **a, float kb, float **b){
devij;
c[i][j] = ka * a[i][j] + kb * b[i][j];
}
float *init(float *mat, const int m, const float init){
mat::_setValue<<<m, 1>>>(mat, init);
return mat;
}
double *init(double *mat, const int m, const double init){
mat::_setValue<<<m, 1>>>(mat, init);
return mat;
}
float **init(float **mat, const int m, const int n, const float init){
dim3 dimBlock(m, nbt);
mat::_setValue<<<dimBlock, n / nbt>>>(mat, init);
return mat;
}
float ***init(float ***mat, const int p, const int m, const int n, const float init){
dim3 dimBlock(m, nbt);
for(int i = 0; i < p; i++){
mat::_setValue<<<dimBlock, n / nbt>>>(mat, init, i);
}
return mat;
}
float *initHost(float *mat, const int m, const float init){
for(int i = 0; i < m; i++){
mat[i] = init;
}
return mat;
}
float **initHost(float **mat, const int m, const int n, const float init){
for(int i = 0; i < m; i++){
for(int j = 0; j < n; j++){
mat[i][j] = init;
}
}
return mat;
}
float ***initHost(float ***mat, const int p, const int m, const int n, float init){
for(int k = 0; k < p; k++){
for(int i = 0; i < m; i++){
for(int j = 0; j < n; j++){
mat[k][i][j] = init;
}
}
}
return mat;
}
float *create(const int m) {
float *data;
cudaMalloc((void **)&data, m * sizeof(float));
return data;
}
float **create(const int m, const int n){
float *data = mat::create(m * n);
float **mat;
cudaMalloc((void **)&mat, m * sizeof(float *));
mat::_setPointerValue<<<m, 1>>>(mat, data, n);
return mat;
}
float ***create(const int p, const int m, const int n){
float ***mat;
cudaMalloc((void **)&mat, p * sizeof(float **));
for(int i = 0; i < p; i++){
mat::_setPointerValue<<<1,1>>>(mat, mat::create(m, n), i);
}
return mat;
}
float *createHost(const int m) {
return (float *)malloc(m * sizeof(float));
}
float **createHost(const int m, const int n){
float *data = mat::createHost(m * n);
float **mat = (float **)malloc(m * sizeof(float *));
for(int i =0; i < m; i++){
mat[i] = data + n * i;
}
return mat;
}
float ***createHost(const int p, const int m, const int n){
float ***mat = (float ***)malloc(p * sizeof(float **));
for(int i = 0; i < p; i++){
mat[i] = mat::createHost(m, n);
}
return mat;
}
int *createInt(const int m){
int *a;
cudaMalloc((void**)&a, m * sizeof(int));
return a;
}
int *createIntHost(const int m) {
return (int *)malloc(m * sizeof(int));
}
double *createDouble(const int m){
double *a;
cudaMalloc((void**)&a, m * sizeof(double));
return a;
}
double *createDoubleHost(const int m) {
return (double *)malloc(m * sizeof(double));
}
float *getDataPointer(float **mat){
float **p=(float **)malloc(sizeof(float *));
cudaMemcpy(p, mat , sizeof(float *), cudaMemcpyDeviceToHost);
return *p;
}
void copy(float **mat, float **init, const int m, const int n){
dim3 dimBlock(m, nbt);
mat::_copy<<<dimBlock, n / nbt>>>(mat, init);
}
void copy(float **mat, float **init, float k, const int m, const int n){
dim3 dimBlock(m, nbt);
mat::_copy<<<dimBlock, n / nbt>>>(mat, init, k);
}
void copyHostToDevice(float *d_a, const float *a, const int m){
cudaMemcpy(d_a, a , m * sizeof(float), cudaMemcpyHostToDevice);
}
void copyHostToDevice(float **pd_a, float *pa, const int m, const int n){
float **phd_a=(float **)malloc(sizeof(float *));
cudaMemcpy(phd_a, pd_a , sizeof(float *), cudaMemcpyDeviceToHost);
cudaMemcpy(*phd_a, pa , m * n * sizeof(float), cudaMemcpyHostToDevice);
}
void copyHostToDevice(float **pd_a, float **pa, const int m, const int n){
float **phd_a=(float **)malloc(sizeof(float *));
cudaMemcpy(phd_a, pd_a , sizeof(float *), cudaMemcpyDeviceToHost);
cudaMemcpy(*phd_a, *pa , m * n * sizeof(float), cudaMemcpyHostToDevice);
}
void copyHostToDevice(float ***pd_a, float ***pa, const int p, const int m, const int n){
float ***phd_a=(float ***)malloc(p * sizeof(float **));
cudaMemcpy(phd_a, pd_a, p * sizeof(float **), cudaMemcpyDeviceToHost);
for(int i = 0; i < p; i++){
mat::copyHostToDevice(phd_a[i], pa[i], m, n);
}
}
void copyDeviceToHost(float *a, const float *d_a, const int m){
cudaMemcpy(a, d_a , m * sizeof(float), cudaMemcpyDeviceToHost);
}
void copyDeviceToHost(float *pa, float **pd_a, const int m, const int n){
float **phd_a=(float **)malloc(sizeof(float *));
cudaMemcpy(phd_a, pd_a , sizeof(float *), cudaMemcpyDeviceToHost);
cudaMemcpy(pa, *phd_a , m * n * sizeof(float), cudaMemcpyDeviceToHost);
}
void copyDeviceToHost(float **pa, float **pd_a, const int m, const int n){
float **phd_a=(float **)malloc(sizeof(float *));
cudaMemcpy(phd_a, pd_a , sizeof(float *), cudaMemcpyDeviceToHost);
cudaMemcpy(*pa, *phd_a , m * n * sizeof(float), cudaMemcpyDeviceToHost);
}
void copyDeviceToHost(float ***pa, float ***pd_a, const int p, const int m, const int n){
float ***phd_a=(float ***)malloc(p * sizeof(float **));
cudaMemcpy(phd_a, pd_a, p * sizeof(float **), cudaMemcpyDeviceToHost);
for(int i = 0; i < p; i++){
mat::copyDeviceToHost(pa[i], phd_a[i], m, n);
}
}
void calc(float **c, float ka, float **a, float kb, float **b, int m, int n){
dim3 dimBlock(m, nbt);
mat::_calc<<<dimBlock, n / nbt>>>(c, ka, a, kb, b);
}
float norm(float *a, int n){
float norm_a = 0;
cublasSnrm2_v2(cublas_handle, n, a, 1, &norm_a);
return norm_a;
}
float norm(float **a, int m, int n){
return mat::norm(mat::getDataPointer(a), m * n);
}
float amax(float *a, int n){
int index = 0;
cublasIsamax_v2(cublas_handle, n, a, 1, &index);
float *b = mat::create(1);
mat::_setIndexValue<<<1, 1>>>(b, a, index - 1);
float *c = mat::createHost(1);
mat::copyDeviceToHost(c, b, 1);
return c[0];
}
float amax(float **a, int m, int n){
return mat::amax(mat::getDataPointer(a), m * n);
}
float dot(float *a, float *b, int n){
float dot_ab = 0;
cublasSdot_v2(cublas_handle, n, a, 1, b, 1, &dot_ab);
return dot_ab;
}
float dot(float **a, float **b, int m, int n){
return mat::dot(mat::getDataPointer(a), mat::getDataPointer(b), m * n);
}
void read(float *data, int n, char *fname){
char buffer[50] = "externaltools/";
strcat(buffer, fname);
FILE *file = fopen(buffer, "rb");
fwrite(data, sizeof(float), n, file);
fclose(file);
}
void write(float *data, int n, char *fname){
char buffer[50] = "externaltools/";
strcat(buffer, fname);
FILE *file = fopen(buffer, "wb");
fwrite(data, sizeof(float), n, file);
fclose(file);
}
void write(float **data, int m, int n, char *fname){
char buffer[50] = "externaltools/";
strcat(buffer, fname);
FILE *file = fopen(buffer, "wb");
for(int i = 0; i < m; i++){
fwrite(data[i], sizeof(float), n, file);
}
fclose(file);
}
void write(float ***data, int p, int m, int n, char *fname){
char buffer[50] = "externaltools/";
strcat(buffer, fname);
FILE *file = fopen(buffer, "wb");
for(int k = 0; k < p; k++){
for(int i = 0; i < m; i++){
fwrite(data[k][i], sizeof(float), n, file);
}
}
fclose(file);
}
}
dim3 &nxb = dat::nxb;
dim3 &nzt = dat::nzt;
int &sh = dat::wave_propagation_sh;
int &psv = dat::wave_propagation_psv;
int &mode = dat::simulation_mode;
int &nx = dat::nx;
int &nz = dat::nz;
int &nt = dat::nt;
int &nsrc = dat::nsrc;
int &nrec = dat::nrec;
float &dx = dat::dx;
float &dz = dat::dz;
float &dt = dat::dt;
__global__ void divSY(float **dsy, float **sxy, float **szy, float dx, float dz, int nx, int nz){
devij;
if(i >= 2 && i < nx - 2){
dsy[i][j] = 9*(sxy[i][j]-sxy[i-1][j])/(8*dx)-(sxy[i+1][j]-sxy[i-2][j])/(24*dx);
}
else{
dsy[i][j] = 0;
}
if(j >= 2 && j < nz - 2){
dsy[i][j] += 9*(szy[i][j]-szy[i][j-1])/(8*dz)-(szy[i][j+1]-szy[i][j-2])/(24*dz);
}
}
__global__ void divSXZ(float **dsx, float **dsz, float **sxx, float **szz, float **sxz, float dx, float dz, int nx, int nz){
devij;
if(i >= 2 && i < nx - 2){
dsx[i][j] = 9*(sxx[i][j]-sxx[i-1][j])/(8*dx)-(sxx[i+1][j]-sxx[i-2][j])/(24*dx);
dsz[i][j] = 9*(sxz[i][j]-sxz[i-1][j])/(8*dx)-(sxz[i+1][j]-sxz[i-2][j])/(24*dx);
}
else{
dsx[i][j] = 0;
dsz[i][j] = 0;
}
if(j >= 2 && j < nz - 2){
dsx[i][j] += 9*(sxz[i][j]-sxz[i][j-1])/(8*dz)-(sxz[i][j+1]-sxz[i][j-2])/(24*dz);
dsz[i][j] += 9*(szz[i][j]-szz[i][j-1])/(8*dz)-(szz[i][j+1]-szz[i][j-2])/(24*dz);
}
}
__global__ void divVY(float **dvydx, float **dvydz, float **vy, float dx, float dz, int nx, int nz){
devij;
if(i >= 1 && i < nx - 2){
dvydx[i][j] = 9*(vy[i+1][j]-vy[i][j])/(8*dx)-(vy[i+2][j]-vy[i-1][j])/(24*dx);
}
else{
dvydx[i][j] = 0;
}
if(j >= 1 && j < nz - 2){
dvydz[i][j] = 9*(vy[i][j+1]-vy[i][j])/(8*dz)-(vy[i][j+2]-vy[i][j-1])/(24*dz);
}
else{
dvydz[i][j] = 0;
}
}
__global__ void divVXZ(float **dvxdx, float **dvxdz, float **dvzdx, float **dvzdz, float **vx, float **vz, float dx, float dz, int nx, int nz){
devij;
if(i >= 1 && i < nx - 2){
dvxdx[i][j] = 9*(vx[i+1][j]-vx[i][j])/(8*dx)-(vx[i+2][j]-vx[i-1][j])/(24*dx);
dvzdx[i][j] = 9*(vz[i+1][j]-vz[i][j])/(8*dx)-(vz[i+2][j]-vz[i-1][j])/(24*dx);
}
else{
dvxdx[i][j] = 0;
dvzdx[i][j] = 0;
}
if(j >= 1 && j < nz - 2){
dvxdz[i][j] = 9*(vx[i][j+1]-vx[i][j])/(8*dz)-(vx[i][j+2]-vx[i][j-1])/(24*dz);
dvzdz[i][j] = 9*(vz[i][j+1]-vz[i][j])/(8*dz)-(vz[i][j+2]-vz[i][j-1])/(24*dz);
}
else{
dvxdz[i][j] = 0;
dvzdz[i][j] = 0;
}
}
__global__ void addSTF(float **dsx, float **dsy, float **dsz, float **stf_x, float **stf_y, float **stf_z,
int *src_x_id, int *src_z_id, int isrc, int sh, int psv, int it){
int is = blockIdx.x;
int xs = src_x_id[is];
int zs = src_z_id[is];
if(isrc < 0 || isrc == is){
if(sh){
dsy[xs][zs] += stf_y[is][it];
}
if(psv){
dsx[xs][zs] += stf_x[is][it];
dsz[xs][zs] += stf_z[is][it];
}
}
}
__global__ void saveV(float **v_rec_x, float **v_rec_y, float **v_rec_z, float **vx, float **vy, float **vz,
int *rec_x_id, int *rec_z_id, int sh, int psv, int it){
int ir = blockIdx.x;
int xr = rec_x_id[ir];
int zr = rec_z_id[ir];
if(sh){
v_rec_y[ir][it] = vy[xr][zr];
}
if(psv){
v_rec_x[ir][it] = vx[xr][zr];
v_rec_z[ir][it] = vz[xr][zr];
}
}
__global__ void saveV(float ***v_rec_x, float ***v_rec_y, float ***v_rec_z, float **vx, float **vy, float **vz,
int *rec_x_id, int *rec_z_id, int isrc, int sh, int psv, int it){
int ir = blockIdx.x;
int xr = rec_x_id[ir];
int zr = rec_z_id[ir];
if(sh){
v_rec_y[isrc][ir][it] = vy[xr][zr];
}
if(psv){
v_rec_x[isrc][ir][it] = vx[xr][zr];
v_rec_z[isrc][ir][it] = vz[xr][zr];
}
}
__global__ void updateV(float **v, float **ds, float **rho, float **absbound, float dt){
devij;
v[i][j] = absbound[i][j] * (v[i][j] + dt * ds[i][j] / rho[i][j]);
}
__global__ void updateSY(float **sxy, float **szy, float **dvydx, float **dvydz, float **mu, float dt){
devij;
sxy[i][j] += dt * mu[i][j] * dvydx[i][j];
szy[i][j] += dt * mu[i][j] * dvydz[i][j];
}
__global__ void updateSXZ(float **sxx, float **szz, float **sxz, float **dvxdx, float **dvxdz, float **dvzdx, float **dvzdz,
float **lambda, float **mu, float dt){
devij;
sxx[i][j] += dt * ((lambda[i][j] + 2 * mu[i][j]) * dvxdx[i][j] + lambda[i][j] * dvzdz[i][j]);
szz[i][j] += dt * ((lambda[i][j] + 2 * mu[i][j]) * dvzdz[i][j] + lambda[i][j] * dvxdx[i][j]);
sxz[i][j] += dt * (mu[i][j] * (dvxdz[i][j] + dvzdx[i][j]));
}
__global__ void updateU(float **u, float **v, float dt){
devij;
u[i][j] += v[i][j] * dt;
}
__global__ void interactionRhoY(float **K_rho, float **vy, float **vy_fw, float tsfe){
devij;
K_rho[i][j] -= vy_fw[i][j] * vy[i][j] * tsfe;
}
__global__ void interactionRhoXZ(float **K_rho, float **vx, float **vx_fw, float **vz, float **vz_fw, float tsfe){
devij;
K_rho[i][j] -= (vx_fw[i][j] * vx[i][j] + vz_fw[i][j] * vz[i][j]) * tsfe;
}
__global__ void interactionMuY(float **K_mu, float **dvydx, float **dvydx_fw, float **dvydz, float **dvydz_fw, float tsfe){
devij;
K_mu[i][j] -= (dvydx[i][j] * dvydx_fw[i][j] + dvydz[i][j] * dvydz_fw[i][j]) * tsfe;
}
__global__ void interactionMuXZ(float **K_mu, float **dvxdx, float **dvxdx_fw, float **dvxdz, float **dvxdz_fw,
float **dvzdx, float **dvzdx_fw, float **dvzdz, float **dvzdz_fw, float tsfe){
devij;
K_mu[i][j] -= (2 * dvxdx[i][j] * dvxdx_fw[i][j] + 2 * dvzdz[i][j] * dvzdz_fw[i][j] +
(dvxdz[i][j] + dvzdx[i][j]) * (dvzdx_fw[i][j] + dvxdz_fw[i][j])) * tsfe;
}
__global__ void interactionLambdaXZ(float **K_lambda, float **dvxdx, float **dvxdx_fw, float **dvzdz, float **dvzdz_fw, float tsfe){
devij;
K_lambda[i][j] -= ((dvxdx[i][j] + dvzdz[i][j]) * (dvxdx_fw[i][j] + dvzdz_fw[i][j])) * tsfe;
}
__global__ void computeIndices(int *coord_n_id, float *coord_n, float Ln, float n){
int i = blockIdx.x;
coord_n_id[i] = (int)(coord_n[i] / Ln * (n - 1) + 0.5);
}
__global__ void initialiseAbsorbingBoundaries(float **absbound, float width,
int absorb_left, int absorb_right, int absorb_bottom, int absorb_top,
float Lx, float Lz, float dx, float dz){
devij;
absbound[i][j] = 1;
float X = i * dx;
float Z = j * dz;
if(absorb_left){
if(X < width){
absbound[i][j] *= exp(-pow((X - width) / (2 * width), 2));
}
}
if(absorb_right){
if(X > Lx - width){
absbound[i][j] *= exp(-pow((X - (Lx - width)) / (2 * width), 2));
}
}
if(absorb_bottom){
if(Z < width){
absbound[i][j] *= exp(-pow((Z - width) / (2 * width), 2));
}
}
if(absorb_top){
if(Z > Lz - width){
absbound[i][j] *= exp(-pow((Z - (Lz - width)) / (2 * width), 2));
}
}
}
__global__ void prepareAdjointSTF(float **adstf, float **u_syn, float ***u_obs, float *tw, int nt, int isrc){
int it = blockIdx.x;
int irec = threadIdx.x;
adstf[irec][nt - it - 1] = (u_syn[irec][it] - u_obs[isrc][irec][it]) * tw[it] * 2;
}
__global__ void normKernel(float **model, float model_ref, float misfit_ref){
devij;
model[i][j] *= model_ref / misfit_ref;
}
__device__ float gaussian(int x, int sigma){
float xf = (float)x;
float sigmaf = (float)sigma;
return (1 / (sqrtf(2 * d_pi) * sigmaf)) * expf(-xf * xf / (2 * sigmaf * sigmaf));
}
__global__ void initialiseGaussian(float **model, int nx, int nz, int sigma){
devij;
float sumx = 0;
for(int n = 0; n < nx; n++){
sumx += gaussian(i - n, sigma);
}
float sumz = 0;
for(int n = 0; n < nz; n++){
sumz += gaussian(j - n, sigma);
}
model[i][j] = sumx * sumz;
}
__global__ void filterKernelX(float **model, float **gtemp, int nx, int sigma){
devij;
float sumx = 0;
for(int n = 0; n < nx; n++){
sumx += gaussian(i - n, sigma) * model[n][j];
}
gtemp[i][j] = sumx;
}
__global__ void filterKernelZ(float **model, float **gtemp, float **gsum, int nz, int sigma){
devij;
float sumz = 0;
for(int n = 0; n < nz; n++){
sumz += gaussian(j - n, sigma) * gtemp[i][n];
}
model[i][j] = sumz / gsum[i][j];
}
__global__ void updateModel(float **model, float **kernel, float step, float step_prev){
devij;
model[i][j] /= (1 - step_prev * kernel[i][j]);
model[i][j] *= (1 - step * kernel[i][j]);
}
__global__ void getTaperWeights(float *tw, float dt, int nt){
int it = blockIdx.x;
float t_end = (nt - 1) * dt;
float taper_width = t_end / 10;
float t_min = taper_width;
float t_max = t_end - taper_width;
float t = it * dt;
if(t <= t_min){
tw[it] = 0.5 + 0.5 * cosf(d_pi * (t_min - t) / (taper_width));
}
else if(t >= t_max){
tw[it] = 0.5 + 0.5 * cosf(d_pi * (t_max - t) / (taper_width));
}
else{
tw[it] = 1;
}
}
__global__ void calculateMisfit(float *misfit, float **u_syn, float ***u_obs, float *tw, float dt, int isrc, int irec){
int it = blockIdx.x;
float wavedif = (u_syn[irec][it] - u_obs[isrc][irec][it]) * tw[it];
misfit[it] += wavedif * wavedif * dt;
}
__global__ void reduceSystem(const double * __restrict d_in1, double * __restrict d_out1, const double * __restrict d_in2, double * __restrict d_out2, const int M, const int N) {
const int i = blockIdx.x;
const int j = threadIdx.x;
if ((i < N) && (j < N)){
d_out1[j * N + i] = d_in1[j * M + i];
d_out2[j * N + i] = d_in2[j * M + i];
}
}
static void solveQR(double *h_A, double *h_B, double *XC, const int Nrows, const int Ncols){
int work_size = 0;
int *devInfo = mat::createInt(1);
double *d_A = mat::createDouble(Nrows * Ncols);
cudaMemcpy(d_A, h_A, Nrows * Ncols * sizeof(double), cudaMemcpyHostToDevice);
double *d_TAU = mat::createDouble(min(Nrows, Ncols));
cusolverDnDgeqrf_bufferSize(solver_handle, Nrows, Ncols, d_A, Nrows, &work_size);
double *work = mat::createDouble(work_size);
cusolverDnDgeqrf(solver_handle, Nrows, Ncols, d_A, Nrows, d_TAU, work, work_size, devInfo);
double *d_Q = mat::createDouble(Nrows * Nrows);
cusolverDnDormqr(solver_handle, CUBLAS_SIDE_LEFT, CUBLAS_OP_N, Nrows, Ncols, min(Nrows, Ncols), d_A, Nrows, d_TAU, d_Q, Nrows, work, work_size, devInfo);
double *d_C = mat::createDouble(Nrows * Nrows);
mat::init(d_C, Nrows * Nrows, 0);
cudaMemcpy(d_C, h_B, Nrows * sizeof(double), cudaMemcpyHostToDevice);
cusolverDnDormqr(solver_handle, CUBLAS_SIDE_LEFT, CUBLAS_OP_T, Nrows, Ncols, min(Nrows, Ncols), d_A, Nrows, d_TAU, d_C, Nrows, work, work_size, devInfo);
double *d_R = mat::createDouble(Ncols * Ncols);
double *d_B = mat::createDouble(Ncols * Ncols);
reduceSystem<<<Ncols, Ncols>>>(d_A, d_R, d_C, d_B, Nrows, Ncols);
const double alpha = 1.;
cublasDtrsm(cublas_handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, Ncols, Ncols,
&alpha, d_R, Ncols, d_B, Ncols);
cudaMemcpy(XC, d_B, Ncols * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFree(d_Q);
cudaFree(d_R);
cudaFree(d_TAU);
cudaFree(devInfo);
cudaFree(work);
}
static double polyfit(double *x, double *y, double *p, int n){
double *A = mat::createDoubleHost(3 * n);
for(int i = 0; i < n; i++){
A[i] = x[i] * x[i];
A[i + n] = x[i];
A[i + n * 2] = 1;
}
solveQR(A, y, p, n, 3);
double rss = 0;
for(int i = 0; i < n; i++){
double ei = p[0] * x[i] * x[i] + p[1] * x[i] + p[2];
rss += pow(y[i] - ei, 2);
}
return rss;
}
static void importData(){
FILE *datfile = fopen("externaltools/config","r");
char *buffer = 0;
long length;
fseek (datfile, 0, SEEK_END);
length = ftell (datfile);
fseek (datfile, 0, SEEK_SET);
buffer = (char *)malloc (length + 1);
fread (buffer, 1, length, datfile);
buffer[length] = '\0';
fclose(datfile);
if (buffer){
DynamicJsonBuffer jsonBuffer;
JsonObject& root = jsonBuffer.parseObject(buffer);
if (!root.success()){
printf("parseObject() failed\n");
}
else{
dat::nx = root["nx"];
dat::nz = root["nz"];
dat::nt = root["nt"];
dat::dt = root["dt"];
dat::Lx = root["Lx"];
dat::Lz = root["Lz"];
dat::sfe = root["sfe"];
dat::model_type = root["model_type"];
dat::use_given_model = root["use_given_model"];
dat::use_given_stf = root["use_given_stf"];
dat::source_amplitude = root["source_amplitude"];
dat::order = root["order"]; // order = 2: later
dat::obs_type = root["obs_type"];
dat::absorb_left = root["absorb_left"];
dat::absorb_right = root["absorb_right"];
dat::absorb_top = root["absorb_top"];
dat::absorb_bottom = root["absorb_bottom"];
dat::absorb_width = root["width"];
{
const char* wave_propagation_type = root["wave_propagation_type"].as<char*>();
if(strcmp(wave_propagation_type,"SH") == 0){
dat::wave_propagation_sh = 1;
dat::wave_propagation_psv = 0;
}
else if(strcmp(wave_propagation_type,"PSV") == 0){
dat::wave_propagation_sh = 0;
dat::wave_propagation_psv = 1;
}
else if(strcmp(wave_propagation_type,"both") == 0){
dat::wave_propagation_sh = 1;
dat::wave_propagation_psv = 1;
}
else{
dat::wave_propagation_sh = 0;
dat::wave_propagation_psv = 0;
}
}
{
int single_src = root["src_info"].is<JsonObject>();
dat::nsrc = single_src?1:root["src_info"].size();
float *src_x = mat::createHost(nsrc);
float *src_z = mat::createHost(nsrc);
dat::stf_type = mat::createIntHost(nsrc);
dat::stf_PSV_x = mat::createHost(nsrc);
dat::stf_PSV_z = mat::createHost(nsrc);
dat::tauw_0 = mat::createHost(nsrc);
dat::tauw = mat::createHost(nsrc);
dat::tee_0 = mat::createHost(nsrc);
dat::f_min = mat::createHost(nsrc);
dat::f_max = mat::createHost(nsrc);
for(int isrc = 0; isrc < nsrc; isrc++){
JsonObject& src = single_src?root["src_info"]:((JsonArray&)root["src_info"]).get<JsonObject>(isrc);
src_x[isrc] = src["loc_x"];
src_z[isrc] = src["loc_z"];
dat::stf_PSV_x[isrc] = src["stf_PSV"][0];
dat::stf_PSV_z[isrc] = src["stf_PSV"][1];
dat::tauw_0[isrc] = src["tauw_0"];
dat::tauw[isrc] = src["tauw"];
dat::tee_0[isrc] = src["tee_0"];
dat::f_min[isrc] = src["f_min"];
dat::f_max[isrc] = src["f_max"];
const char* stf_type_str = src["stf_type"].as<char*>();
if(strcmp(stf_type_str,"delta") == 0){
dat::stf_type[isrc] = 0;
}
else if(strcmp(stf_type_str,"delta_bp") == 0){
dat::stf_type[isrc] = 1;
}
else if(strcmp(stf_type_str,"ricker") == 0){
dat::stf_type[isrc] = 2;
}
else if(strcmp(stf_type_str,"heaviside_bp") == 0){
dat::stf_type[isrc] = 3;
}
else{
dat::stf_type[isrc] = -1;
}
}
dat::src_x = mat::create(nsrc);
dat::src_z = mat::create(nsrc);
mat::copyHostToDevice(dat::src_x, src_x, nsrc);
mat::copyHostToDevice(dat::src_z, src_z, nsrc);
free(src_x);
free(src_z);
}
{
int single_rec = root["rec_x"].is<float>();
dat::nrec = single_rec?1:root["rec_x"].size();
float *rec_x = mat::createHost(nrec);
float *rec_z = mat::createHost(nrec);
for(int irec = 0; irec < nrec; irec++){
rec_x[irec] = single_rec?root["rec_x"]:((JsonArray&)root["rec_x"]).get<float>(irec);
rec_z[irec] = single_rec?root["rec_z"]:((JsonArray&)root["rec_z"]).get<float>(irec);
}
dat::rec_x = mat::create(nrec);
dat::rec_z = mat::create(nrec);
mat::copyHostToDevice(dat::rec_x, rec_x, nrec);
mat::copyHostToDevice(dat::rec_z, rec_z, nrec);
free(rec_x);
free(rec_z);
}
}
jsonBuffer.clear();
}
}
static void checkMemoryUsage(){
size_t free_byte ;
size_t total_byte ;
cudaMemGetInfo( &free_byte, &total_byte ) ;
float free_db = (float)free_byte ;
float total_db = (float)total_byte ;
float used_db = total_db - free_db ;
printf("memory usage: %.1fMB / %.1fMB\n", used_db / 1024.0 / 1024.0, total_db / 1024.0 / 1024.0);
}
static void makeSourceTimeFunction(float *stf, int index){
float max = 0;
float alfa = 2 * dat::tauw_0[index] / dat::tauw[index];
for(int it = 0; it < nt; it++){
float t = it * dt;
switch(dat::stf_type[index]){
case 2:{
stf[it] = (-2 * pow(alfa, 3) / pi) * (t - dat::tee_0[index]) * exp(-pow(alfa, 2) * pow(t - dat::tee_0[index], 2));
break;
}
// other stf: later
}
if(fabs(stf[it]) > max){
max = fabs(stf[it]);
}
}
if(max > 0){
for(int it = 0; it < nt; it++){
stf[it] /= max;
}
}
}
static void prepareSTF(){
float amp = dat::source_amplitude / dx / dz;
float **stf_x = mat::createHost(nsrc, nt);
float **stf_y = mat::createHost(nsrc, nt);
float **stf_z = mat::createHost(nsrc, nt);
float *stfn = mat::createHost(nt);
for(int isrc = 0; isrc < nsrc; isrc++){
makeSourceTimeFunction(stfn, isrc);
float px = dat::stf_PSV_x[isrc];
float pz = dat::stf_PSV_z[isrc];
float norm = sqrt(pow(px,2) + pow(pz,2));
for(int it = 0; it < nt; it++){
stf_x[isrc][it] = amp * stfn[it] * px / norm;
stf_y[isrc][it] = amp * stfn[it];
stf_z[isrc][it] = amp * stfn[it] * pz / norm;
}
}
mat::copyHostToDevice(dat::stf_x, stf_x, nsrc, nt);
mat::copyHostToDevice(dat::stf_y, stf_y, nsrc, nt);
mat::copyHostToDevice(dat::stf_z, stf_z, nsrc, nt);
free(*stf_x);
free(*stf_y);
free(*stf_z);
free(stf_x);
free(stf_y);
free(stf_z);
free(stfn);
}
static void defineMaterialParameters(){
// other model_type: later
switch(dat::model_type){
case 1:{
mat::init(dat::rho, nx, nz, 3000);
mat::init(dat::mu, nx, nz, 4.8e10);
mat::init(dat::lambda, nx, nz, 4.8e10);
break;
}
case 10:{
mat::init(dat::rho, nx, nz, 2600);
mat::init(dat::mu, nx, nz, 2.66e10);
mat::init(dat::lambda, nx, nz, 3.42e10);
break;
}
case 13:{
mat::init(dat::mu, nx, nz, 2.66e10);
mat::init(dat::lambda, nx, nz, 3.42e10);
float rho = 2600;
float mu = 2.66e10;
float lambda = 3.42e10;
float vp = sqrt((lambda + 2*mu) / rho);
float vs = sqrt(mu / rho);
int left = (int)((float)nx / 2 - (float)nx / 20 + 0.5);
int right = (int)((float)nx / 2 + (float)nx / 20 + 0.5);
int bottom = (int)((float)nz / 2 - (float)nz / 20 + 0.5);
int top = (int)((float)nz / 2 + (float)nz / 20 + 0.5);
float **rho2 = mat::createHost(nx, nz);
mat::initHost(rho2, nx, nz, 2600);
for(int i = left; i < right; i++){
for(int j = bottom; j < top; j++){
rho2[i][j] = 2800;
}
}
mat::copyHostToDevice(dat::rho, rho2, nx, nz);
free(*rho2);
free(rho2);
}
}
}
static void initialiseDynamicFields(){
if(sh){
mat::init(dat::vy, nx, nz, 0);
mat::init(dat::uy, nx, nz, 0);
mat::init(dat::sxy, nx, nz, 0);
mat::init(dat::szy, nx, nz, 0);
}
if(psv){
mat::init(dat::vx, nx, nz, 0);
mat::init(dat::vz, nx, nz, 0);
mat::init(dat::ux, nx, nz, 0);
mat::init(dat::uz, nx, nz, 0);
mat::init(dat::sxx, nx, nz, 0);
mat::init(dat::szz, nx, nz, 0);
mat::init(dat::sxz, nx, nz, 0);
}
}
static void initialiseKernels(){
mat::init(dat::K_lambda, nx, nz, 0);
mat::init(dat::K_mu, nx, nz, 0);
mat::init(dat::K_rho, nx, nz, 0);
}
static void runWaveFieldPropagation(){
initialiseDynamicFields();
for(int it = 0; it < nt; it++){
if(mode == 0){
if((it + 1) % dat::sfe == 0){
int isfe = dat::nsfe - (it + 1) / dat::sfe;
if(sh){
mat::copyDeviceToHost(dat::uy_forward[isfe], dat::uy, nx, nz);
}
if(psv){
mat::copyDeviceToHost(dat::ux_forward[isfe], dat::ux, nx, nz);
mat::copyDeviceToHost(dat::uz_forward[isfe], dat::uz, nx, nz);
}
}
}
if(sh){
divSY<<<nxb, nzt>>>(dat::dsy, dat::sxy, dat::szy, dx, dz, nx, nz);
}
if(psv){
divSXZ<<<nxb, nzt>>>(dat::dsx, dat::dsz, dat::sxx, dat::szz, dat::sxz, dx, dz, nx, nz);
}
if(mode == 0){
addSTF<<<nsrc, 1>>>(
dat::dsx, dat::dsy, dat::dsz, dat::stf_x, dat::stf_y, dat::stf_z,
dat::src_x_id, dat::src_z_id, dat::isrc, sh, psv, it
);
}
else if(mode == 1){
addSTF<<<nrec, 1>>>(
dat::dsx, dat::dsy, dat::dsz, dat::adstf_x, dat::adstf_y, dat::adstf_z,
dat::rec_x_id, dat::rec_z_id, -1, sh, psv, it
);
}
if(sh){
updateV<<<nxb, nzt>>>(dat::vy, dat::dsy, dat::rho, dat::absbound, dt);
divVY<<<nxb, nzt>>>(dat::dvydx, dat::dvydz, dat::vy, dx, dz, nx, nz);
updateSY<<<nxb, nzt>>>(dat::sxy, dat::szy, dat::dvydx, dat::dvydz, dat::mu, dt);
updateU<<<nxb, nzt>>>(dat::uy, dat::vy, dt);
}
if(psv){
updateV<<<nxb, nzt>>>(dat::vx, dat::dsx, dat::rho, dat::absbound, dt);
updateV<<<nxb, nzt>>>(dat::vz, dat::dsz, dat::rho, dat::absbound, dt);
divVXZ<<<nxb, nzt>>>(dat::dvxdx, dat::dvxdz, dat::dvzdx, dat::dvzdz, dat::vx, dat::vz, dx, dz, nx, nz);
updateSXZ<<<nxb, nzt>>>(dat::sxx, dat::szz, dat::sxz, dat::dvxdx, dat::dvxdz, dat::dvzdx, dat::dvzdz, dat::lambda, dat::mu, dt);
updateU<<<nxb, nzt>>>(dat::ux, dat::vx, dt);
updateU<<<nxb, nzt>>>(dat::uz, dat::vz, dt);
}
if(mode == 0){
if(dat::obs_type == 0){
saveV<<<nrec, 1>>>(
dat::v_rec_x, dat::v_rec_y, dat::v_rec_z, dat::vx, dat::vy, dat::vz,
dat::rec_x_id, dat::rec_z_id, sh, psv, it
);
}
else if(dat::obs_type == 1){
saveV<<<nrec, 1>>>(
dat::v_rec_x, dat::v_rec_y, dat::v_rec_z, dat::ux, dat::uy, dat::uz,
dat::rec_x_id, dat::rec_z_id, sh, psv, it
);
}
else if(dat::obs_type == 2 && dat::isrc >= 0){
saveV<<<nrec, 1>>>(
dat::u_obs_x, dat::u_obs_y, dat::u_obs_z, dat::ux, dat::uy, dat::uz,
dat::rec_x_id, dat::rec_z_id, dat::isrc, sh, psv, it
);
}
if((it + 1) % dat::sfe == 0){
int isfe = dat::nsfe - (it + 1) / dat::sfe;
if(sh){
mat::copyDeviceToHost(dat::vy_forward[isfe], dat::vy, nx, nz);
}
if(psv){
mat::copyDeviceToHost(dat::vx_forward[isfe], dat::vx, nx, nz);
mat::copyDeviceToHost(dat::vz_forward[isfe], dat::vz, nx, nz);
}
}
}
else if(mode == 1){
if((it + dat::sfe) % dat::sfe == 0){
// dsi -> ui_fw -> vi_fw
int isfe = (it + dat::sfe) / dat::sfe - 1;
float tsfe = dat::sfe * dt;
if(sh){
mat::copyHostToDevice(dat::dsy, dat::uy_forward[isfe], nx, nz);
divVY<<<nxb, nzt>>>(dat::dvydx, dat::dvydz, dat::uy, dx, dz, nx, nz);
divVY<<<nxb, nzt>>>(dat::dvydx_fw, dat::dvydz_fw, dat::dsy, dx, dz, nx, nz);
mat::copyHostToDevice(dat::dsy, dat::vy_forward[isfe], nx, nz);
interactionRhoY<<<nxb, nzt>>>(dat::K_rho, dat::vy, dat::dsy, tsfe);
interactionMuY<<<nxb, nzt>>>(dat::K_mu, dat::dvydx, dat::dvydx_fw, dat::dvydz, dat::dvydz_fw, tsfe);
}
if(psv){
mat::copyHostToDevice(dat::dsx, dat::ux_forward[isfe], nx, nz);
mat::copyHostToDevice(dat::dsz, dat::uz_forward[isfe], nx, nz);
divVXZ<<<nxb, nzt>>>(
dat::dvxdx, dat::dvxdz, dat::dvzdx, dat::dvzdz,
dat::ux, dat::uz, dx, dz, nx, nz
);
divVXZ<<<nxb, nzt>>>(
dat::dvxdx_fw, dat::dvxdz_fw, dat::dvzdx_fw, dat::dvzdz_fw,
dat::dsx, dat::dsz, dx, dz, nx, nz
);
mat::copyHostToDevice(dat::dsx, dat::vx_forward[isfe], nx, nz);
mat::copyHostToDevice(dat::dsz, dat::vz_forward[isfe], nx, nz);
interactionRhoXZ<<<nxb, nzt>>>(dat::K_rho, dat::vx, dat::dsx, dat::vz, dat::dsz, tsfe);
interactionMuXZ<<<nxb, nzt>>>(
dat::K_mu, dat::dvxdx, dat::dvxdx_fw, dat::dvxdz, dat::dvxdz_fw,
dat::dvzdx, dat::dvzdx_fw, dat::dvzdz, dat::dvzdz_fw, tsfe
);
interactionLambdaXZ<<<nxb, nzt>>>(dat::K_lambda, dat::dvxdx, dat::dvxdx_fw, dat::dvzdz, dat::dvzdz_fw, tsfe);
}
}
}
}
}
static void checkArgs(int adjoint){
dat::nxb = dim3(nx, nbt);
dat::nzt = dim3(nz / nbt);
if(nt % dat::sfe != 0){
nt = dat::sfe * (int)((float)nt / dat::sfe + 0.5);
}
dat::nsfe = nt / dat::sfe;
dat::dx = dat::Lx / (nx - 1);
dat::dz = dat::Lz / (nz - 1);
if(sh){
dat::vy = mat::create(nx, nz);
dat::uy = mat::create(nx, nz);
dat::sxy = mat::create(nx, nz);
dat::szy = mat::create(nx, nz);
dat::dsy = mat::create(nx, nz);
dat::dvydx = mat::create(nx, nz);
dat::dvydz = mat::create(nx, nz);
dat::v_rec_y = mat::create(nrec, nt);
dat::uy_forward = mat::createHost(dat::nsfe, nx, nz);
dat::vy_forward = mat::createHost(dat::nsfe, nx, nz);
}
if(psv){
dat::vx = mat::create(nx, nz);
dat::vz = mat::create(nx, nz);
dat::ux = mat::create(nx, nz);
dat::uz = mat::create(nx, nz);
dat::sxx = mat::create(nx, nz);
dat::szz = mat::create(nx, nz);
dat::sxz = mat::create(nx, nz);
dat::dsx = mat::create(nx, nz);
dat::dsz = mat::create(nx, nz);
dat::dvxdx = mat::create(nx, nz);
dat::dvxdz = mat::create(nx, nz);
dat::dvzdx = mat::create(nx, nz);
dat::dvzdz = mat::create(nx, nz);
dat::v_rec_x = mat::create(nrec, nt);
dat::v_rec_z = mat::create(nrec, nt);
dat::ux_forward = mat::createHost(dat::nsfe, nx, nz);
dat::uz_forward = mat::createHost(dat::nsfe, nx, nz);
dat::vx_forward = mat::createHost(dat::nsfe, nx, nz);
dat::vz_forward = mat::createHost(dat::nsfe, nx, nz);
}
dat::absbound = mat::create(nx, nz);
dat::lambda = mat::create(nx, nz);
dat::rho = mat::create(nx, nz);
dat::mu = mat::create(nx, nz);
dat::stf_x = mat::create(nsrc, nt);
dat::stf_y = mat::create(nsrc, nt);
dat::stf_z = mat::create(nsrc, nt);
if(adjoint){
if(sh){
dat::dvydx_fw = mat::create(nx, nz);
dat::dvydz_fw = mat::create(nx, nz);
}
if(psv){
dat::dvxdx_fw = mat::create(nx, nz);
dat::dvxdz_fw = mat::create(nx, nz);
dat::dvzdx_fw = mat::create(nx, nz);
dat::dvzdz_fw = mat::create(nx, nz);
}
dat::K_lambda = mat::create(nx, nz);
dat::K_mu = mat::create(nx, nz);
dat::K_rho = mat::create(nx, nz);
dat::adstf_x = mat::create(nrec, nt);
dat::adstf_y = mat::create(nrec, nt);
dat::adstf_z = mat::create(nrec, nt);
}
dat::src_x_id = mat::createInt(nsrc);
dat::src_z_id = mat::createInt(nsrc);
dat::rec_x_id = mat::createInt(nrec);
dat::rec_z_id = mat::createInt(nrec);
computeIndices<<<nsrc, 1>>>(dat::src_x_id, dat::src_x, dat::Lx, nx);
computeIndices<<<nsrc, 1>>>(dat::src_z_id, dat::src_z, dat::Lz, nz);
computeIndices<<<nrec, 1>>>(dat::rec_x_id, dat::rec_x, dat::Lx, nx);
computeIndices<<<nrec, 1>>>(dat::rec_z_id, dat::rec_z, dat::Lz, nz);
initialiseAbsorbingBoundaries<<<nxb, nzt>>>(
dat::absbound, dat::absorb_width,
dat::absorb_left, dat::absorb_right, dat::absorb_bottom, dat::absorb_top,
dat::Lx, dat::Lz, dx, dz
);
float *t = mat::createHost(nt);
for(int it = 0; it < nt; it++){
t[it] = it * dt;
}
mat::write(t, nt, "t");
}
static void runForward(int isrc){
dat::simulation_mode = 0;
dat::isrc = isrc;
runWaveFieldPropagation();
// float **v_rec_x=mat::createHost(dat::nrec, dat::nt);
// float **v_rec_z=mat::createHost(dat::nrec, dat::nt);
// mat::copyDeviceToHost(v_rec_x, dat::v_rec_x, dat::nrec, dat::nt);
// mat::copyDeviceToHost(v_rec_z, dat::v_rec_z, dat::nrec, dat::nt);
// mat::write(v_rec_x, dat::nrec, dat::nt, "vx_rec");
// mat::write(v_rec_z, dat::nrec, dat::nt, "vz_rec");
// mat::write(dat::vx_forward, dat::nsfe, dat::nx, dat::nz, "vx");
// mat::write(dat::vz_forward, dat::nsfe, dat::nx, dat::nz, "vz");
}
static void runAdjoint(int init_kernel){
dat::simulation_mode = 1;
if(init_kernel){
initialiseKernels();
}
runWaveFieldPropagation();
// float **rho = mat::createHost(dat::nx, dat::nz);
// float **mu = mat::createHost(dat::nx, dat::nz);
// float **lambda = mat::createHost(dat::nx, dat::nz);
// mat::copyDeviceToHost(rho, dat::K_rho, dat::nx, dat::nz);
// mat::copyDeviceToHost(mu, dat::K_mu, dat::nx, dat::nz);
// mat::copyDeviceToHost(lambda, dat::K_lambda, dat::nx, dat::nz);
// mat::write(rho, dat::nx, dat::nz, "rho");
// mat::write(mu, dat::nx, dat::nz, "mu");
// mat::write(lambda, dat::nx, dat::nz, "lambda");
// mat::write(dat::vx_forward, dat::nsfe, dat::nx, dat::nz, "vx");
// mat::write(dat::vz_forward, dat::nsfe, dat::nx, dat::nz, "vz");
}
static float computeKernels(int kernel){
float *d_misfit = mat::create(nt);
float *h_misfit = mat::createHost(nt);
mat::init(d_misfit, nt, 0);
if(kernel){
initialiseKernels();
}
for(int isrc = 0; isrc < nsrc; isrc++){
runForward(isrc);
for(int irec = 0; irec < nrec; irec++){
calculateMisfit<<<nt, 1>>>(d_misfit, dat::v_rec_x, dat::u_obs_x, dat::tw, dt, isrc, irec);
calculateMisfit<<<nt, 1>>>(d_misfit, dat::v_rec_z, dat::u_obs_z, dat::tw, dt, isrc, irec);
}
if(kernel){
prepareAdjointSTF<<<nt, nrec>>>(dat::adstf_x, dat::v_rec_x, dat::u_obs_x, dat::tw, nt, isrc);
prepareAdjointSTF<<<nt, nrec>>>(dat::adstf_z, dat::v_rec_z, dat::u_obs_z, dat::tw, nt, isrc);
mat::init(dat::adstf_y, nrec, nt, 0);
runAdjoint(0);
}
}
mat::copyDeviceToHost(h_misfit, d_misfit, nt);
float misfit = 0;
for(int i = 0; i< nt; i++){
misfit += h_misfit[i];
}
free(h_misfit);
cudaFree(d_misfit);
if(kernel){
if(dat::misfit_ref < 0){
dat::misfit_ref = misfit;
}
normKernel<<<nxb, nzt>>>(dat::K_rho, dat::rho_ref, dat::misfit_ref);
normKernel<<<nxb, nzt>>>(dat::K_mu, dat::mu_ref, dat::misfit_ref);
normKernel<<<nxb, nzt>>>(dat::K_lambda, dat::lambda_ref, dat::misfit_ref);
filterKernelX<<<nxb, nzt>>>(dat::K_rho, dat::gtemp, nx, dat::sigma);
filterKernelZ<<<nxb, nzt>>>(dat::K_rho, dat::gtemp, dat::gsum, nz, dat::sigma);
filterKernelX<<<nxb, nzt>>>(dat::K_mu, dat::gtemp, nx, dat::sigma);
filterKernelZ<<<nxb, nzt>>>(dat::K_mu, dat::gtemp, dat::gsum, nz, dat::sigma);
filterKernelX<<<nxb, nzt>>>(dat::K_lambda, dat::gtemp, nx, dat::sigma);
filterKernelZ<<<nxb, nzt>>>(dat::K_lambda, dat::gtemp, dat::gsum, nz, dat::sigma);
}
return misfit / dat::misfit_ref;
}
static float findMaxAbs(double *a, int n){
double max = fabs(a[0]);
for(int i = 1; i < n; i++){
if(fabs(a[i]) > max){
max = fabs(a[i]);
}
}
return max;
}
static float updateModels(float step, float step_prev){
// updateModel<<<nxb, nzt>>>(dat::lambda, dat::K_lambda, step, step_prev);
// updateModel<<<nxb, nzt>>>(dat::mu, dat::K_mu, step, step_prev);
updateModel<<<nxb, nzt>>>(dat::rho, dat::K_rho, step, step_prev);
return step;
}
static float calculateStepLength(float teststep, float misfit, int iter){
int nsteps = iter?3:5;
double *stepInArray = mat::createDoubleHost(nsteps);
double *misfitArray = mat::createDoubleHost(nsteps);
double *p = mat::createDoubleHost(3);
for(int i = 0; i < nsteps; i++){
stepInArray[i] = 2 * i * teststep / (nsteps - 1);
}
misfitArray[0] = misfit;
double minmisfit = misfit;
double maxmisfit = misfit;
int n_prev = nsteps;
double *stepInArray_prev = NULL;
double *misfitArray_prev = NULL;
double *stepInArray_new = NULL;
double *misfitArray_new = NULL;
double step_prev = stepInArray[0];
for(int i = 1; i < nsteps; i++){
step_prev = updateModels(stepInArray[i], step_prev);
misfitArray[i] = computeKernels(0);
if(misfitArray[i] < minmisfit){
minmisfit = misfitArray[i];
}
if(misfitArray[i] > maxmisfit){
maxmisfit = misfitArray[i];
}
}
double rss = polyfit(stepInArray, misfitArray, p, nsteps);
double step = -p[1] / (2 * p[0]);
double fitGoodness = rss / (maxmisfit - minmisfit);
double minval=p[0]*step*step+p[1]*step+p[2];
printf("p = [%f, %f, %f]\n",p[0],p[1],p[2]);
printf("s = [%f, %f, %f, %f, %f]\n",stepInArray[0],stepInArray[1],stepInArray[2],nsteps==3?0:stepInArray[3],nsteps==3?0:stepInArray[4]);
printf("m = [%f, %f, %f, %f, %f]\n",misfitArray[0],misfitArray[1],misfitArray[2],nsteps==3?0:misfitArray[3],nsteps==3?0:misfitArray[4]);
printf("step=%e rss=%e fg=%e misfit=%f minval=%f\n",step,rss, fitGoodness,misfit, minval);
int nextra = 0;
int idxEmpty;
while((p[0] < 0 || step < 0 || fitGoodness > 0.1) && nextra < 5){
if(nextra == 0){
stepInArray_prev = mat::createDoubleHost(nsteps);
misfitArray_prev = mat::createDoubleHost(nsteps);
for(int i = 0; i < nsteps; i++){
stepInArray_prev[i] = stepInArray[i];
misfitArray_prev[i] = misfitArray[i];
}
}
stepInArray_new = mat::createDoubleHost(3);
misfitArray_new = mat::createDoubleHost(3);
stepInArray_new[0] = 0;
misfitArray_new[0] = misfitArray_prev[0];
if(p[0] < 0 && step < 0){
stepInArray_new[1] = stepInArray_prev[n_prev - 1];
stepInArray_new[2] = 2 * findMaxAbs(stepInArray_prev, n_prev);
misfitArray_new[1] = misfitArray_prev[n_prev - 1];
idxEmpty = 2;
}
else{
stepInArray_new[1] = stepInArray_prev[1] / 3;
stepInArray_new[2] = stepInArray_prev[1];
misfitArray_new[2] = misfitArray_prev[1];
idxEmpty = 1;
}
step_prev = updateModels(stepInArray_new[idxEmpty], step_prev);
misfitArray_new[idxEmpty] = computeKernels(0);
rss = polyfit(stepInArray_new, misfitArray_new, p, 3);
step = -p[1] / (2 * p[0]);
fitGoodness = rss / (maxmisfit - minmisfit);
double minval=p[0]*step*step+p[1]*step+p[2];
printf("\np = [%f, %f, %f]\n",p[0],p[1],p[2]);
printf("s = [%f, %f, %f]\n",stepInArray_new[0],stepInArray_new[1],stepInArray_new[2]);
printf("m = [%f, %f, %f]\n",misfitArray_new[0],misfitArray_new[1],misfitArray_new[2]);
printf("step=%e rss=%e fg=%e misfit=%f minval=%f\n",step,rss, fitGoodness,misfit, minval);
nextra++;
n_prev = 3;
free(stepInArray_prev);
free(misfitArray_prev);
stepInArray_prev = stepInArray_new;
misfitArray_prev = misfitArray_new;
}
printf("\n\n\n");
free(p);
free(stepInArray);
free(misfitArray);
free(stepInArray_new);
free(misfitArray_new);
return updateModels(step, step_prev);
}
static void inversionRoutine(){
cublasCreate(&cublas_handle);
cusolverDnCreate(&solver_handle);
int niter = 20;
float step = 0.004;
float **lambda = mat::createHost(nx,nz);
float **mu = mat::createHost(nx,nz);
float **rho = mat::createHost(nx,nz);
{ // later
mat::copyDeviceToHost(rho, dat::rho, nx, nz);
mat::copyDeviceToHost(mu, dat::mu, nx, nz);
mat::copyDeviceToHost(lambda, dat::lambda, nx, nz);
mat::write(rho, nx, nz, "rho0");
mat::write(mu, nx, nz, "mu0");
mat::write(lambda, nx, nz, "lambda0");
}
// taper weights
dat::tw = mat::create(nt);
getTaperWeights<<<nt, 1>>>(dat::tw, dt, nt);
// gaussian filter
dat::sigma = 2;
dat::gsum = mat::create(nx, nz);
dat::gtemp = mat::create(nx, nz);
initialiseGaussian<<<nxb, nzt>>>(dat::gsum, nx, nz, dat::sigma);
// adjoint related parameters
dat::obs_type = 1;
dat::misfit_ref = -1;
dat::K_lambda_ref = -1;
dat::K_mu_ref = -1;
dat::K_rho_ref = -1;
int &opm = dat::optimization_method;
if(opm == 1 || opm == 2){
float **g = mat::create(nx, nz);
float **h = mat::create(nx, nz);
float **gpr = mat::create(nx, nz);
float **sch = mat::create(nx, nz);
float misfit = computeKernels(1);
mat::copy(g, dat::rho, nx, nz);
float ng = mat::amax(g, nx, nz);
float n2g = mat::norm(g, nx, nz);
float gg = n2g * n2g;
float gam = 0;
mat::init(h, nx, nz, 0);
float nh = 0;
// reduce cudaMalloc: later
for(int iter = 0; iter < niter; iter++){
printf("iter = %d\n", iter + 1);
float ggpr = gg;
mat::copy(gpr, g, nx, nz);
mat::calc(h, gam, h, 1, g, nx, nz);
if (mat::dot(g, h, nx, nz) <= 1e-3 * n2g * nh) {
mat::copy(h, g, nx, nz);
nh = n2g;
}
else{
nh = mat::norm(h, nx, nz);
}
mat::copy(dat::rho, h, nx, nz);
step = calculateStepLength(step, misfit, iter);
misfit = computeKernels(1);
mat::copy(g, dat::rho, nx, nz);
ng = mat::amax(g, nx, nz);
n2g = mat::norm(g, nx, nz);
gg = n2g * n2g;
if(opm == 1){
gam = gg / ggpr;
}
else{
mat::calc(gpr, 1, g, -1, gpr, nx, nz);
gam = mat::dot(gpr, g, nx, nz) / ggpr;
}
{ // later
char lname[10], mname[10], rname[10];
char lname2[10], mname2[10], rname2[10];
sprintf(lname, "lambda%d", iter + 1);
sprintf(lname2, "klambda%d", iter + 1);
sprintf(mname, "mu%d", iter + 1);
sprintf(mname2, "kmu%d", iter + 1);
sprintf(rname, "rho%d", iter + 1);
sprintf(rname2, "krho%d", iter + 1);
mat::copyDeviceToHost(rho, dat::rho, nx, nz);
mat::copyDeviceToHost(mu, dat::mu, nx, nz);
mat::copyDeviceToHost(lambda, dat::lambda, nx, nz);
mat::write(rho, nx, nz, rname);
mat::write(mu, nx, nz, mname);
mat::write(lambda, nx, nz, lname);
mat::copyDeviceToHost(rho, dat::K_rho, nx, nz);
mat::copyDeviceToHost(mu, dat::K_mu, nx, nz);
mat::copyDeviceToHost(lambda, dat::K_lambda, nx, nz);
mat::write(rho, nx, nz, rname2);
mat::write(mu, nx, nz, mname2);
mat::write(lambda, nx, nz, lname2);
}
}
}
else{
for(int iter = 0; iter < niter; iter++){
printf("iter = %d\n", iter + 1);
float misfit = computeKernels(1);
step = calculateStepLength(step, misfit, iter);
}
}
cublasDestroy(cublas_handle);
cusolverDnDestroy(solver_handle);
}
static void runSyntheticInvertion(){
checkArgs(1);
dat::obs_type = 2; // save displacement persouce
dat::model_type = 13; // true model: later
prepareSTF(); // dat::use_given_stf, sObsPerFreq: later
defineMaterialParameters(); // dat::use_given_model: later
dat::u_obs_x = mat::create(nsrc, nrec, nt);
dat::u_obs_z = mat::create(nsrc, nrec, nt);
for(int isrc = 0; isrc < nsrc; isrc++){
runForward(isrc);
}
dat::model_type = 10;
dat::rho_ref = 2600;
dat::mu_ref = 2.66e10;
dat::lambda_ref = 3.42e10;
dat::optimization_method = 2;
defineMaterialParameters();
inversionRoutine();
}
int main(int argc , char *argv[]){
importData();
if(argc == 1){
runSyntheticInvertion();
}
else{
for(int i = 1; i< argc; i++){
if(strcmp(argv[i],"run_forward") == 0){
checkArgs(0);
prepareSTF();
defineMaterialParameters();
runForward(-1);
}
}
}
checkMemoryUsage();
return 0;
}
|
d7fb15b76e22a27bd852ef76b1050a9340b882a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<embedding_table.hpp>
#include<logger.hpp>
using namespace s2t::sys;
__global__ void embed_copy(size_t destIndex, size_t srcIndex, float_t* src, float_t* dest)
{
// int i = blockIdx.x*blockDim.x + threadIdx.x;
//printf("Hello from block %d, thread %d %d\n", blockIdx.x, threadIdx.x, blockDim.x);
// TBD:: put a check on length
size_t gpu_srcIndex = srcIndex*blockDim.x + threadIdx.x;
size_t gpu_destIndex = destIndex*blockDim.x + threadIdx.x;
dest[gpu_destIndex] = src[gpu_srcIndex];
}
void embedding_table::lookup(cudnnHandle_t& cudnn, const std::vector<size_t>& seq, gpu_float_array& output)
{
output.reshape(seq.size(), d_table.shape[1]);
// can we make a single kernel call ?
for(size_t index = 0;index < seq.size();++index)
{
hipLaunchKernelGGL(( embed_copy), dim3(1), dim3(output.shape[1]), 0, 0, index, seq[index], d_table.ptr, output.ptr);
}
} | d7fb15b76e22a27bd852ef76b1050a9340b882a3.cu | #include<embedding_table.hpp>
#include<logger.hpp>
using namespace s2t::sys;
__global__ void embed_copy(size_t destIndex, size_t srcIndex, float_t* src, float_t* dest)
{
// int i = blockIdx.x*blockDim.x + threadIdx.x;
//printf("Hello from block %d, thread %d %d\n", blockIdx.x, threadIdx.x, blockDim.x);
// TBD:: put a check on length
size_t gpu_srcIndex = srcIndex*blockDim.x + threadIdx.x;
size_t gpu_destIndex = destIndex*blockDim.x + threadIdx.x;
dest[gpu_destIndex] = src[gpu_srcIndex];
}
void embedding_table::lookup(cudnnHandle_t& cudnn, const std::vector<size_t>& seq, gpu_float_array& output)
{
output.reshape(seq.size(), d_table.shape[1]);
// can we make a single kernel call ?
for(size_t index = 0;index < seq.size();++index)
{
embed_copy<<<1, output.shape[1]>>>(index, seq[index], d_table.ptr, output.ptr);
}
} |
e5c2c7eff261fea2cc456c0fe374e31a39002576.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) )
static void HandleError( hipError_t err, const char *file, int line )
{
if (err != hipSuccess)
{
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
const int N = 2048;
// CUDA Kernel for Vector Addition
__global__ void Vector_Addition ( const int *dev_a , const int *dev_b , int *dev_c)
{
//Get the id of thread within a block
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x ;
while ( tid < N ) // check the boundry condition for the threads
{
dev_c [tid] = dev_a[tid] + dev_b[tid] ;
tid+= blockDim.x * gridDim.x ;
}
}
int main (void)
{
//Host array
int Host_a[N], Host_b[N], Host_c[N];
//Device array
int *dev_a , *dev_b, *dev_c ;
//Allocate the memory on the GPU
HANDLE_ERROR ( hipMalloc((void **)&dev_a , N*sizeof(int) ) );
HANDLE_ERROR ( hipMalloc((void **)&dev_b , N*sizeof(int) ) );
HANDLE_ERROR ( hipMalloc((void **)&dev_c , N*sizeof(int) ) );
//fill the Host array with random elements on the CPU
for ( int i = 0; i <N ; i++ )
{
Host_a[i] = -i ;
Host_b[i] = i*i ;
}
//Copy Host array to Device array
HANDLE_ERROR (hipMemcpy (dev_a , Host_a , N*sizeof(int) , hipMemcpyHostToDevice));
HANDLE_ERROR (hipMemcpy (dev_b , Host_b , N*sizeof(int) , hipMemcpyHostToDevice));
//Make a call to GPU kernel
hipLaunchKernelGGL(( Vector_Addition) , dim3((N+127)/128), dim3(128) , 0, 0, dev_a , dev_b , dev_c ) ;
//Copy back to Host array from Device array
HANDLE_ERROR (hipMemcpy(Host_c , dev_c , N*sizeof(int) , hipMemcpyDeviceToHost));
//Display the result
for ( int i = 0; i<N; i++ )
printf ("%d + %d = %d\n", Host_a[i] , Host_b[i] , Host_c[i] ) ;
//Free the Device array memory
hipFree (dev_a) ;
hipFree (dev_b) ;
hipFree (dev_c) ;
system("pause");
return 0 ;
}
| e5c2c7eff261fea2cc456c0fe374e31a39002576.cu | #include <stdio.h>
#define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) )
static void HandleError( cudaError_t err, const char *file, int line )
{
if (err != cudaSuccess)
{
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
const int N = 2048;
// CUDA Kernel for Vector Addition
__global__ void Vector_Addition ( const int *dev_a , const int *dev_b , int *dev_c)
{
//Get the id of thread within a block
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x ;
while ( tid < N ) // check the boundry condition for the threads
{
dev_c [tid] = dev_a[tid] + dev_b[tid] ;
tid+= blockDim.x * gridDim.x ;
}
}
int main (void)
{
//Host array
int Host_a[N], Host_b[N], Host_c[N];
//Device array
int *dev_a , *dev_b, *dev_c ;
//Allocate the memory on the GPU
HANDLE_ERROR ( cudaMalloc((void **)&dev_a , N*sizeof(int) ) );
HANDLE_ERROR ( cudaMalloc((void **)&dev_b , N*sizeof(int) ) );
HANDLE_ERROR ( cudaMalloc((void **)&dev_c , N*sizeof(int) ) );
//fill the Host array with random elements on the CPU
for ( int i = 0; i <N ; i++ )
{
Host_a[i] = -i ;
Host_b[i] = i*i ;
}
//Copy Host array to Device array
HANDLE_ERROR (cudaMemcpy (dev_a , Host_a , N*sizeof(int) , cudaMemcpyHostToDevice));
HANDLE_ERROR (cudaMemcpy (dev_b , Host_b , N*sizeof(int) , cudaMemcpyHostToDevice));
//Make a call to GPU kernel
Vector_Addition <<< (N+127)/128, 128 >>> (dev_a , dev_b , dev_c ) ;
//Copy back to Host array from Device array
HANDLE_ERROR (cudaMemcpy(Host_c , dev_c , N*sizeof(int) , cudaMemcpyDeviceToHost));
//Display the result
for ( int i = 0; i<N; i++ )
printf ("%d + %d = %d\n", Host_a[i] , Host_b[i] , Host_c[i] ) ;
//Free the Device array memory
cudaFree (dev_a) ;
cudaFree (dev_b) ;
cudaFree (dev_c) ;
system("pause");
return 0 ;
}
|
94bfb754ef81856e423b7360ae396aba966c1f3b.hip | // !!! This is a file automatically generated by hipify!!!
//
// KdTreeGPUsms.cu
//
// Created by John Robinson on 7/15/15.
// Copyright (c) 2015 John Robinson. All rights reserved.
/*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSEARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//
/*
* Copyright (c) 2015, Russell A. Brown
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSEARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* @(#)kdTreeSingleThread.cc 1.61 04/13/15 */
/*
* The k-d tree was described by Jon Bentley in "Multidimensional Binary Search Trees
* Used for Associative Searching", CACM 18(9): 509-517, 1975. For k dimensions and
* n elements of data, a balanced k-d tree is built in O(kn log n) + O((k+1)n log n)
* time by first sorting the data in each of k dimensions, then building the k-d tree
* in a manner that preserves the order of the k sorts while recursively partitioning
* the data at each level of the k-d tree. No further sorting is necessary. Moreover,
* it is possible to replace the O((k+1)n log n) term with a O((k-1)n log n) term but
* this approach sacrifices the generality of building the k-d tree for points of any
* number of dimensions.
*/
#include <stdbool.h>
#include <stdlib.h>
#include <vector>
#include <list>
#include <math.h>
#include <iostream>
#include <iomanip>
using std::setprecision;
using namespace std;
#include "Gpu.h"
#include "KdNode.h"
//#if __cplusplus != 201103L
#if 0
#include <chrono>
#define TIMER_DECLARATION() \
auto startTime = std::chrono::high_resolution_clock::now(); \
auto endTime = <std::chrono::high_resolution_clock::now();
#define TIMER_START() \
startTime = std::chrono::high_resolution_clock::now(); // high_resolution_clock::is_steady
#define TIMER_STOP(__TIMED) \
endTime = std::chrono::high_resolution_clock::now(); \
__TIMED = (std::chrono::duration<double, std::milli>(std::chrono::high_resolution_clock::now() - startTime).count())/1000.0
#elif defined(MACH)
#define TIMER_DECLARATION() \
struct timespec startTime, endTime;
#define TIMER_START() \
mach_gettime(CLOCK_REALTIME, &startTime);
#define TIMER_STOP(__TIMED) \
clock_gettime(CLOCK_REALTIME, &endTime); \
__TIMED = (endTime.tv_sec - startTime.tv_sec) + \
1.0e-9 * ((double)(endTime.tv_nsec - startTime.tv_nsec))
#else
#define TIMER_DECLARATION() \
struct timespec startTime, endTime;
#define TIMER_START() \
clock_gettime(CLOCK_REALTIME, &startTime);
#define TIMER_STOP(__TIMED) \
clock_gettime(CLOCK_REALTIME, &endTime); \
__TIMED = (endTime.tv_sec - startTime.tv_sec) + \
1.0e-9 * ((double)(endTime.tv_nsec - startTime.tv_nsec))
#endif
Gpu *gpu;
/*
* The superKeyCompare method compares two sint arrays in all k dimensions,
* and uses the sorting or partition coordinate as the most significant dimension.
*
* calling parameters:
*
* a - a int*
* b - a int*
* p - the most significant dimension
* dim - the number of dimensions
*
* returns: +1, 0 or -1 as the result of comparing two sint arrays
*/
KdCoord KdNode::superKeyCompare(const KdCoord *a, const KdCoord *b, const sint p, const sint dim)
{
KdCoord diff = 0;
for (sint i = 0; i < dim; i++) {
sint r = i + p;
r = (r < dim) ? r : r - dim;
diff = a[r] - b[r];
if (diff != 0) {
break;
}
}
return diff;
}
/*
* Walk the k-d tree and check that the children of a node are in the correct branch of that node.
*
* calling parameters:
*
* dim - the number of dimensions
* depth - the depth in the k-d tree
*
* returns: a count of the number of kdNodes in the k-d tree
*/
sint KdNode::verifyKdTree( const KdNode kdNodes[], const KdCoord coords[], const sint dim, const sint depth) const
{
sint count = 1 ;
// The partition cycles as x, y, z, w...
sint axis = depth % dim;
if (ltChild != -1) {
if (superKeyCompare(coords+kdNodes[ltChild].tuple*dim, coords+tuple*dim, axis, dim) >= 0) {
cout << "At Depth " << depth << " LT child is > node on axis " << axis << "!" << endl;
printTuple(coords+tuple*dim, dim);
cout << " < [" << ltChild << "]";
printTuple(coords+kdNodes[ltChild].tuple*dim, dim);
cout << endl;
exit(1);
}
count += kdNodes[ltChild].verifyKdTree(kdNodes, coords, dim, depth + 1);
}
if (gtChild != -1) {
if (superKeyCompare(coords+kdNodes[gtChild].tuple*dim, coords+tuple*dim, axis, dim) <= 0) {
cout << "At Depth " << depth << " GT child is < node on axis " << axis << "!" << endl;
printTuple(coords+tuple*dim, dim);
cout << " > [" << gtChild << "]";
printTuple(coords+kdNodes[gtChild].tuple*dim, dim);
cout << endl;
exit(1);
}
count += kdNodes[gtChild].verifyKdTree(kdNodes, coords, dim, depth + 1);
}
return count;
}
/*
* The createKdTree function performs the necessary initialization then calls the buildKdTree function.
*
* calling parameters:
*
* coordinates - a vector<int*> of references to each of the (x, y, z, w...) tuples
* numDimensions - the number of dimensions
*
* returns: a KdNode pointer to the root of the k-d tree
*/
KdNode* KdNode::createKdTree(KdNode kdNodes[], KdCoord coordinates[], const sint numDimensions, const sint numTuples)
{
TIMER_DECLARATION();
TIMER_START();
Gpu::initializeKdNodesArray(coordinates, numTuples, numDimensions);
hipDeviceSynchronize();
TIMER_STOP (double initTime);
// Sort the reference array using multiple threads if possible.
TIMER_START();
sint end[numDimensions]; // Array used to collect results of the remove duplicates function
Gpu::mergeSort(end, numTuples, numDimensions);
TIMER_STOP (double sortTime);
// Check that the same number of references was removed from each reference array.
for (sint i = 0; i < numDimensions-1; i++) {
if (end[i] < 0) {
cout << "removeDuplicates failed on dimension " << i << endl;
cout << end[0];
for (sint k = 1; k<numDimensions; k++) cout << ", " << end[k] ;
cout << endl;
exit(1);
}
for (sint j = i + 1; j < numDimensions; j++) {
if ( end[i] != end[j] ) {
cout << "Duplicate removal error" << endl;
cout << end[0];
for (sint k = 1; k<numDimensions; k++) cout << ", " << end[k] ;
cout << endl;
exit(1);
}
}
}
cout << numTuples-end[0] << " equal nodes removed. "<< endl;
// Build the k-d tree.
TIMER_START();
// refIdx_t root = gpu->startBuildKdTree(kdNodes, end[0], numDimensions);
refIdx_t root = Gpu::buildKdTree(kdNodes, end[0], numDimensions);
TIMER_STOP (double kdTime);
// Verify the k-d tree and report the number of KdNodes.
TIMER_START();
sint numberOfNodes = Gpu::verifyKdTree(kdNodes, root, numDimensions, numTuples);
// sint numberOfNodes = kdNodes[root].verifyKdTree( kdNodes, coordinates, numDimensions, 0);
cout << "Number of nodes = " << numberOfNodes << endl;
TIMER_STOP (double verifyTime);
cout << "totalTime = " << fixed << setprecision(4) << initTime + sortTime + kdTime + verifyTime
<< " initTime = " << initTime << " sortTime + removeDuplicatesTime = " << sortTime
<< " kdTime = " << kdTime << " verifyTime = " << verifyTime << endl << endl;
// Return the pointer to the root of the k-d tree.
return &kdNodes[root];
}
/*
* Search the k-d tree and find the KdNodes that lie within a cutoff distance
* from a query node in all k dimensions.
*
* calling parameters:
*
* query - the query point
* cut - the cutoff distance
* dim - the number of dimensions
* depth - the depth in the k-d tree
*
* returns: a list that contains the kdNodes that lie within the cutoff distance of the query node
*/
list<KdNode> KdNode::searchKdTree(const KdNode kdNodes[], const KdCoord coords[], const KdCoord* query, const KdCoord cut,
const sint dim, const sint depth) const {
// The partition cycles as x, y, z, w...
sint axis = depth % dim;
// If the distance from the query node to the k-d node is within the cutoff distance
// in all k dimensions, add the k-d node to a list.
list<KdNode> result;
bool inside = true;
for (sint i = 0; i < dim; i++) {
if (abs(query[i] - coords[tuple*dim+i]) > cut) {
inside = false;
break;
}
}
if (inside) {
result.push_back(*this); // The push_back function expects a KdNode for a call by reference.
}
// Search the < branch of the k-d tree if the partition coordinate of the query point minus
// the cutoff distance is <= the partition coordinate of the k-d node. The < branch must be
// searched when the cutoff distance equals the partition coordinate because the super key
// may assign a point to either branch of the tree if the sorting or partition coordinate,
// which forms the most significant portion of the super key, shows equality.
if ( ltChild != -1 && (query[axis] - cut) <= coords[tuple*dim+axis] ) {
list<KdNode> ltResult = kdNodes[ltChild].searchKdTree(kdNodes, coords, query, cut, dim, depth + 1);
result.splice(result.end(), ltResult); // Can't substitute searchKdTree(...) for ltResult.
}
// Search the > branch of the k-d tree if the partition coordinate of the query point plus
// the cutoff distance is >= the partition coordinate of the k-d node. The < branch must be
// searched when the cutoff distance equals the partition coordinate because the super key
// may assign a point to either branch of the tree if the sorting or partition coordinate,
// which forms the most significant portion of the super key, shows equality.
if ( gtChild != -1 && (query[axis] + cut) >= coords[tuple*dim+axis] ) {
list<KdNode> gtResult = kdNodes[gtChild].searchKdTree(kdNodes, coords, query, cut, dim, depth + 1);
result.splice(result.end(), gtResult); // Can't substitute searchKdTree(...) for gtResult.
}
return result;
}
/*
* Print one tuple.
*
* calling parameters:
*
* tuple - the tuple to print
* dim - the number of dimensions
*/
void KdNode::printTuple(const KdCoord* tuple, const sint dim)
{
cout << "(" << tuple[dim] << ",";
for (sint i=1; i<dim-1; i++) cout << tuple[i] << ",";
cout << tuple[dim-1] << ")";
}
/*
* Print the k-d tree "sideways" with the root at the ltChild.
*
* calling parameters:
*
* dim - the number of dimensions
* depth - the depth in the k-d tree
*/
void KdNode::printKdTree(KdNode kdNodes[], const KdCoord coords[], const sint dim, const sint depth) const
{
if (gtChild != -1) {
kdNodes[gtChild].printKdTree(kdNodes, coords, dim, depth+1);
}
for (sint i=0; i<depth; i++) cout << " ";
printTuple(coords+tuple*dim, dim);
cout << endl;
if (ltChild != -1) {
kdNodes[ltChild].printKdTree(kdNodes, coords, dim, depth+1);
}
}
/* Create a simple k-d tree and print its topology for inspection. */
sint main(sint argc, char **argv)
{
// Set the defaults then parse the input arguments.
sint numPoints = 4194304;
sint extraPoints = 100;
sint numDimensions = 3;
sint numThreads = 512;
sint numBlocks = 32;
sint searchDistance = 20000000;
sint maximumNumberOfNodesToPrint = 5;
for (sint i = 1; i < argc; i++) {
if ( 0 == strcmp(argv[i], "-n") || 0 == strcmp(argv[i], "--numPoints") ) {
numPoints = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-x") || 0 == strcmp(argv[i], "--extraPoints") ) {
extraPoints = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-d") || 0 == strcmp(argv[i], "--numDimensions") ) {
numDimensions = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-t") || 0 == strcmp(argv[i], "--numThreads") ) {
numThreads = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-b") || 0 == strcmp(argv[i], "--numBlocks") ) {
numBlocks = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-s") || 0 == strcmp(argv[i], "--searchDistance") ) {
searchDistance = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-p") || 0 == strcmp(argv[i], "--maximumNodesToPrint") ) {
maximumNumberOfNodesToPrint = atol(argv[++i]);
continue;
}
cout << "Unsupported command-line argument: " << argv[i] << endl;
exit(1);
}
sint i = maximumNumberOfNodesToPrint + numDimensions + extraPoints;
// Declare the two-dimensional coordinates array that contains (x,y,z) coordinates.
/*
sint coordinates[NUM_TUPLES][DIMENSIONS] = {
{2,3,3}, {5,4,2}, {9,6,7}, {4,7,9}, {8,1,5},
{7,2,6}, {9,4,1}, {8,4,2}, {9,7,8}, {6,3,1},
{3,4,5}, {1,6,8}, {9,5,3}, {2,1,3}, {8,7,6},
{5,4,2}, {6,3,1}, {8,7,6}, {9,6,7}, {2,1,3},
{7,2,6}, {4,7,9}, {1,6,8}, {3,4,5}, {9,4,1} };
*/
// gpu = new Gpu(numThreads,numBlocks,0,numDimensions);
Gpu::gpuSetup(2, numThreads,numBlocks,numDimensions);
if (Gpu::getNumThreads() == 0 || Gpu::getNumBlocks() == 0) {
cout << "KdNode Tree cannot be built with " << numThreads << " threads or " << numBlocks << " blocks." << endl;
exit(1);
}
cout << "Points = " << numPoints << " dimensions = " << numDimensions << ", threads = " << numThreads << ", blocks = " << numBlocks << endl;
srand(0);
KdCoord (*coordinates) = new KdCoord[numPoints*numDimensions];
for ( i = 0; i<numPoints; i++) {
for (sint j=0; j<numDimensions; j++) {
coordinates[i*numDimensions+j] = (KdCoord)rand();
//coordinates[i*numDimensions+j] = (j==1)? (numPoints-i) : i;
//coordinates[i*numDimensions+j] = i;
}
}
// Create the k-d tree. First copy the data to a tuple in its kdNode.
// also null out the gt and lt references
// create and initialize the kdNodes array
KdNode *kdNodes = new KdNode[numPoints];
if (kdNodes == NULL) {
printf("Can't allocate %d kdNodes\n", numPoints);
exit (1);
}
KdNode *root = KdNode::createKdTree(kdNodes, coordinates, numDimensions, numPoints);
// Print the k-d tree "sideways" with the root at the left.
cout << endl;
if (searchDistance == 0){
return 0;
}
TIMER_DECLARATION();
// Search the k-d tree for the k-d nodes that lie within the cutoff distance of the first tuple.
KdCoord* query = (KdCoord *)malloc(numDimensions * sizeof(KdCoord));
for (sint i = 0; i < numDimensions; i++) {
query[i] = coordinates[i];
}
// read the KdTree back from GPU
Gpu::getKdTreeResults( kdNodes, coordinates, numPoints, numDimensions);
#define VERIFY_ON_HOST
#ifdef VERIFY_ON_HOST
sint numberOfNodes = root->verifyKdTree( kdNodes, coordinates, numDimensions, 0);
cout << "Number of nodes on host = " << numberOfNodes << endl;
#endif
TIMER_START();
list<KdNode> kdList = root->searchKdTree(kdNodes, coordinates, query, searchDistance, numDimensions, 0);
TIMER_STOP(double searchTime);
cout << "searchTime = " << fixed << setprecision(2) << searchTime << " seconds" << endl << endl;
cout << endl << kdList.size() << " nodes within " << searchDistance << " units of ";
KdNode::printTuple(query, numDimensions);
cout << " in all dimensions." << endl << endl;
if (kdList.size() != 0) {
cout << "List of k-d nodes within " << searchDistance << "-unit search distance follows:" << endl << endl;
list<KdNode>::iterator it;
for (it = kdList.begin(); it != kdList.end(); it++) {
KdNode::printTuple(coordinates+it->getTuple()*numDimensions, numDimensions);
cout << " ";
}
cout << endl;
}
return 0;
}
| 94bfb754ef81856e423b7360ae396aba966c1f3b.cu | //
// KdTreeGPUsms.cu
//
// Created by John Robinson on 7/15/15.
// Copyright (c) 2015 John Robinson. All rights reserved.
/*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSEARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//
/*
* Copyright (c) 2015, Russell A. Brown
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSEARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* @(#)kdTreeSingleThread.cc 1.61 04/13/15 */
/*
* The k-d tree was described by Jon Bentley in "Multidimensional Binary Search Trees
* Used for Associative Searching", CACM 18(9): 509-517, 1975. For k dimensions and
* n elements of data, a balanced k-d tree is built in O(kn log n) + O((k+1)n log n)
* time by first sorting the data in each of k dimensions, then building the k-d tree
* in a manner that preserves the order of the k sorts while recursively partitioning
* the data at each level of the k-d tree. No further sorting is necessary. Moreover,
* it is possible to replace the O((k+1)n log n) term with a O((k-1)n log n) term but
* this approach sacrifices the generality of building the k-d tree for points of any
* number of dimensions.
*/
#include <stdbool.h>
#include <stdlib.h>
#include <vector>
#include <list>
#include <math.h>
#include <iostream>
#include <iomanip>
using std::setprecision;
using namespace std;
#include "Gpu.h"
#include "KdNode.h"
//#if __cplusplus != 201103L
#if 0
#include <chrono>
#define TIMER_DECLARATION() \
auto startTime = std::chrono::high_resolution_clock::now(); \
auto endTime = <std::chrono::high_resolution_clock::now();
#define TIMER_START() \
startTime = std::chrono::high_resolution_clock::now(); // high_resolution_clock::is_steady
#define TIMER_STOP(__TIMED) \
endTime = std::chrono::high_resolution_clock::now(); \
__TIMED = (std::chrono::duration<double, std::milli>(std::chrono::high_resolution_clock::now() - startTime).count())/1000.0
#elif defined(MACH)
#define TIMER_DECLARATION() \
struct timespec startTime, endTime;
#define TIMER_START() \
mach_gettime(CLOCK_REALTIME, &startTime);
#define TIMER_STOP(__TIMED) \
clock_gettime(CLOCK_REALTIME, &endTime); \
__TIMED = (endTime.tv_sec - startTime.tv_sec) + \
1.0e-9 * ((double)(endTime.tv_nsec - startTime.tv_nsec))
#else
#define TIMER_DECLARATION() \
struct timespec startTime, endTime;
#define TIMER_START() \
clock_gettime(CLOCK_REALTIME, &startTime);
#define TIMER_STOP(__TIMED) \
clock_gettime(CLOCK_REALTIME, &endTime); \
__TIMED = (endTime.tv_sec - startTime.tv_sec) + \
1.0e-9 * ((double)(endTime.tv_nsec - startTime.tv_nsec))
#endif
Gpu *gpu;
/*
* The superKeyCompare method compares two sint arrays in all k dimensions,
* and uses the sorting or partition coordinate as the most significant dimension.
*
* calling parameters:
*
* a - a int*
* b - a int*
* p - the most significant dimension
* dim - the number of dimensions
*
* returns: +1, 0 or -1 as the result of comparing two sint arrays
*/
KdCoord KdNode::superKeyCompare(const KdCoord *a, const KdCoord *b, const sint p, const sint dim)
{
KdCoord diff = 0;
for (sint i = 0; i < dim; i++) {
sint r = i + p;
r = (r < dim) ? r : r - dim;
diff = a[r] - b[r];
if (diff != 0) {
break;
}
}
return diff;
}
/*
* Walk the k-d tree and check that the children of a node are in the correct branch of that node.
*
* calling parameters:
*
* dim - the number of dimensions
* depth - the depth in the k-d tree
*
* returns: a count of the number of kdNodes in the k-d tree
*/
sint KdNode::verifyKdTree( const KdNode kdNodes[], const KdCoord coords[], const sint dim, const sint depth) const
{
sint count = 1 ;
// The partition cycles as x, y, z, w...
sint axis = depth % dim;
if (ltChild != -1) {
if (superKeyCompare(coords+kdNodes[ltChild].tuple*dim, coords+tuple*dim, axis, dim) >= 0) {
cout << "At Depth " << depth << " LT child is > node on axis " << axis << "!" << endl;
printTuple(coords+tuple*dim, dim);
cout << " < [" << ltChild << "]";
printTuple(coords+kdNodes[ltChild].tuple*dim, dim);
cout << endl;
exit(1);
}
count += kdNodes[ltChild].verifyKdTree(kdNodes, coords, dim, depth + 1);
}
if (gtChild != -1) {
if (superKeyCompare(coords+kdNodes[gtChild].tuple*dim, coords+tuple*dim, axis, dim) <= 0) {
cout << "At Depth " << depth << " GT child is < node on axis " << axis << "!" << endl;
printTuple(coords+tuple*dim, dim);
cout << " > [" << gtChild << "]";
printTuple(coords+kdNodes[gtChild].tuple*dim, dim);
cout << endl;
exit(1);
}
count += kdNodes[gtChild].verifyKdTree(kdNodes, coords, dim, depth + 1);
}
return count;
}
/*
* The createKdTree function performs the necessary initialization then calls the buildKdTree function.
*
* calling parameters:
*
* coordinates - a vector<int*> of references to each of the (x, y, z, w...) tuples
* numDimensions - the number of dimensions
*
* returns: a KdNode pointer to the root of the k-d tree
*/
KdNode* KdNode::createKdTree(KdNode kdNodes[], KdCoord coordinates[], const sint numDimensions, const sint numTuples)
{
TIMER_DECLARATION();
TIMER_START();
Gpu::initializeKdNodesArray(coordinates, numTuples, numDimensions);
cudaDeviceSynchronize();
TIMER_STOP (double initTime);
// Sort the reference array using multiple threads if possible.
TIMER_START();
sint end[numDimensions]; // Array used to collect results of the remove duplicates function
Gpu::mergeSort(end, numTuples, numDimensions);
TIMER_STOP (double sortTime);
// Check that the same number of references was removed from each reference array.
for (sint i = 0; i < numDimensions-1; i++) {
if (end[i] < 0) {
cout << "removeDuplicates failed on dimension " << i << endl;
cout << end[0];
for (sint k = 1; k<numDimensions; k++) cout << ", " << end[k] ;
cout << endl;
exit(1);
}
for (sint j = i + 1; j < numDimensions; j++) {
if ( end[i] != end[j] ) {
cout << "Duplicate removal error" << endl;
cout << end[0];
for (sint k = 1; k<numDimensions; k++) cout << ", " << end[k] ;
cout << endl;
exit(1);
}
}
}
cout << numTuples-end[0] << " equal nodes removed. "<< endl;
// Build the k-d tree.
TIMER_START();
// refIdx_t root = gpu->startBuildKdTree(kdNodes, end[0], numDimensions);
refIdx_t root = Gpu::buildKdTree(kdNodes, end[0], numDimensions);
TIMER_STOP (double kdTime);
// Verify the k-d tree and report the number of KdNodes.
TIMER_START();
sint numberOfNodes = Gpu::verifyKdTree(kdNodes, root, numDimensions, numTuples);
// sint numberOfNodes = kdNodes[root].verifyKdTree( kdNodes, coordinates, numDimensions, 0);
cout << "Number of nodes = " << numberOfNodes << endl;
TIMER_STOP (double verifyTime);
cout << "totalTime = " << fixed << setprecision(4) << initTime + sortTime + kdTime + verifyTime
<< " initTime = " << initTime << " sortTime + removeDuplicatesTime = " << sortTime
<< " kdTime = " << kdTime << " verifyTime = " << verifyTime << endl << endl;
// Return the pointer to the root of the k-d tree.
return &kdNodes[root];
}
/*
* Search the k-d tree and find the KdNodes that lie within a cutoff distance
* from a query node in all k dimensions.
*
* calling parameters:
*
* query - the query point
* cut - the cutoff distance
* dim - the number of dimensions
* depth - the depth in the k-d tree
*
* returns: a list that contains the kdNodes that lie within the cutoff distance of the query node
*/
list<KdNode> KdNode::searchKdTree(const KdNode kdNodes[], const KdCoord coords[], const KdCoord* query, const KdCoord cut,
const sint dim, const sint depth) const {
// The partition cycles as x, y, z, w...
sint axis = depth % dim;
// If the distance from the query node to the k-d node is within the cutoff distance
// in all k dimensions, add the k-d node to a list.
list<KdNode> result;
bool inside = true;
for (sint i = 0; i < dim; i++) {
if (abs(query[i] - coords[tuple*dim+i]) > cut) {
inside = false;
break;
}
}
if (inside) {
result.push_back(*this); // The push_back function expects a KdNode for a call by reference.
}
// Search the < branch of the k-d tree if the partition coordinate of the query point minus
// the cutoff distance is <= the partition coordinate of the k-d node. The < branch must be
// searched when the cutoff distance equals the partition coordinate because the super key
// may assign a point to either branch of the tree if the sorting or partition coordinate,
// which forms the most significant portion of the super key, shows equality.
if ( ltChild != -1 && (query[axis] - cut) <= coords[tuple*dim+axis] ) {
list<KdNode> ltResult = kdNodes[ltChild].searchKdTree(kdNodes, coords, query, cut, dim, depth + 1);
result.splice(result.end(), ltResult); // Can't substitute searchKdTree(...) for ltResult.
}
// Search the > branch of the k-d tree if the partition coordinate of the query point plus
// the cutoff distance is >= the partition coordinate of the k-d node. The < branch must be
// searched when the cutoff distance equals the partition coordinate because the super key
// may assign a point to either branch of the tree if the sorting or partition coordinate,
// which forms the most significant portion of the super key, shows equality.
if ( gtChild != -1 && (query[axis] + cut) >= coords[tuple*dim+axis] ) {
list<KdNode> gtResult = kdNodes[gtChild].searchKdTree(kdNodes, coords, query, cut, dim, depth + 1);
result.splice(result.end(), gtResult); // Can't substitute searchKdTree(...) for gtResult.
}
return result;
}
/*
* Print one tuple.
*
* calling parameters:
*
* tuple - the tuple to print
* dim - the number of dimensions
*/
void KdNode::printTuple(const KdCoord* tuple, const sint dim)
{
cout << "(" << tuple[dim] << ",";
for (sint i=1; i<dim-1; i++) cout << tuple[i] << ",";
cout << tuple[dim-1] << ")";
}
/*
* Print the k-d tree "sideways" with the root at the ltChild.
*
* calling parameters:
*
* dim - the number of dimensions
* depth - the depth in the k-d tree
*/
void KdNode::printKdTree(KdNode kdNodes[], const KdCoord coords[], const sint dim, const sint depth) const
{
if (gtChild != -1) {
kdNodes[gtChild].printKdTree(kdNodes, coords, dim, depth+1);
}
for (sint i=0; i<depth; i++) cout << " ";
printTuple(coords+tuple*dim, dim);
cout << endl;
if (ltChild != -1) {
kdNodes[ltChild].printKdTree(kdNodes, coords, dim, depth+1);
}
}
/* Create a simple k-d tree and print its topology for inspection. */
sint main(sint argc, char **argv)
{
// Set the defaults then parse the input arguments.
sint numPoints = 4194304;
sint extraPoints = 100;
sint numDimensions = 3;
sint numThreads = 512;
sint numBlocks = 32;
sint searchDistance = 20000000;
sint maximumNumberOfNodesToPrint = 5;
for (sint i = 1; i < argc; i++) {
if ( 0 == strcmp(argv[i], "-n") || 0 == strcmp(argv[i], "--numPoints") ) {
numPoints = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-x") || 0 == strcmp(argv[i], "--extraPoints") ) {
extraPoints = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-d") || 0 == strcmp(argv[i], "--numDimensions") ) {
numDimensions = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-t") || 0 == strcmp(argv[i], "--numThreads") ) {
numThreads = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-b") || 0 == strcmp(argv[i], "--numBlocks") ) {
numBlocks = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-s") || 0 == strcmp(argv[i], "--searchDistance") ) {
searchDistance = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-p") || 0 == strcmp(argv[i], "--maximumNodesToPrint") ) {
maximumNumberOfNodesToPrint = atol(argv[++i]);
continue;
}
cout << "Unsupported command-line argument: " << argv[i] << endl;
exit(1);
}
sint i = maximumNumberOfNodesToPrint + numDimensions + extraPoints;
// Declare the two-dimensional coordinates array that contains (x,y,z) coordinates.
/*
sint coordinates[NUM_TUPLES][DIMENSIONS] = {
{2,3,3}, {5,4,2}, {9,6,7}, {4,7,9}, {8,1,5},
{7,2,6}, {9,4,1}, {8,4,2}, {9,7,8}, {6,3,1},
{3,4,5}, {1,6,8}, {9,5,3}, {2,1,3}, {8,7,6},
{5,4,2}, {6,3,1}, {8,7,6}, {9,6,7}, {2,1,3},
{7,2,6}, {4,7,9}, {1,6,8}, {3,4,5}, {9,4,1} };
*/
// gpu = new Gpu(numThreads,numBlocks,0,numDimensions);
Gpu::gpuSetup(2, numThreads,numBlocks,numDimensions);
if (Gpu::getNumThreads() == 0 || Gpu::getNumBlocks() == 0) {
cout << "KdNode Tree cannot be built with " << numThreads << " threads or " << numBlocks << " blocks." << endl;
exit(1);
}
cout << "Points = " << numPoints << " dimensions = " << numDimensions << ", threads = " << numThreads << ", blocks = " << numBlocks << endl;
srand(0);
KdCoord (*coordinates) = new KdCoord[numPoints*numDimensions];
for ( i = 0; i<numPoints; i++) {
for (sint j=0; j<numDimensions; j++) {
coordinates[i*numDimensions+j] = (KdCoord)rand();
//coordinates[i*numDimensions+j] = (j==1)? (numPoints-i) : i;
//coordinates[i*numDimensions+j] = i;
}
}
// Create the k-d tree. First copy the data to a tuple in its kdNode.
// also null out the gt and lt references
// create and initialize the kdNodes array
KdNode *kdNodes = new KdNode[numPoints];
if (kdNodes == NULL) {
printf("Can't allocate %d kdNodes\n", numPoints);
exit (1);
}
KdNode *root = KdNode::createKdTree(kdNodes, coordinates, numDimensions, numPoints);
// Print the k-d tree "sideways" with the root at the left.
cout << endl;
if (searchDistance == 0){
return 0;
}
TIMER_DECLARATION();
// Search the k-d tree for the k-d nodes that lie within the cutoff distance of the first tuple.
KdCoord* query = (KdCoord *)malloc(numDimensions * sizeof(KdCoord));
for (sint i = 0; i < numDimensions; i++) {
query[i] = coordinates[i];
}
// read the KdTree back from GPU
Gpu::getKdTreeResults( kdNodes, coordinates, numPoints, numDimensions);
#define VERIFY_ON_HOST
#ifdef VERIFY_ON_HOST
sint numberOfNodes = root->verifyKdTree( kdNodes, coordinates, numDimensions, 0);
cout << "Number of nodes on host = " << numberOfNodes << endl;
#endif
TIMER_START();
list<KdNode> kdList = root->searchKdTree(kdNodes, coordinates, query, searchDistance, numDimensions, 0);
TIMER_STOP(double searchTime);
cout << "searchTime = " << fixed << setprecision(2) << searchTime << " seconds" << endl << endl;
cout << endl << kdList.size() << " nodes within " << searchDistance << " units of ";
KdNode::printTuple(query, numDimensions);
cout << " in all dimensions." << endl << endl;
if (kdList.size() != 0) {
cout << "List of k-d nodes within " << searchDistance << "-unit search distance follows:" << endl << endl;
list<KdNode>::iterator it;
for (it = kdList.begin(); it != kdList.end(); it++) {
KdNode::printTuple(coordinates+it->getTuple()*numDimensions, numDimensions);
cout << " ";
}
cout << endl;
}
return 0;
}
|
70b1a10562995279c48b9869cc31a7fac5b08f42.hip | // !!! This is a file automatically generated by hipify!!!
// clang-format off
/************************************************************************************\
* *
* Copyright 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
// clang-format on
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "../../graph_parser/parse.h"
#include "../../graph_parser/util.h"
#include "kernel.hip"
// Iteration count
#define ITER 20
void print_vectorf(int *vector, int num);
int main(int argc, char **argv) {
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
hipError_t err = hipSuccess;
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
int *rank_array = (int *)malloc(num_nodes * sizeof(int));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
int *cc_d;
// Create device-side buffers for the graph
err = hipMalloc(&row_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc row_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&col_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc col_d (size:%d) => %s\n", num_edges,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&inrow_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc inrow_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMalloc(&incol_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc incol_d (size:%d) => %s\n",
num_edges, hipGetErrorString(err));
return -1;
}
// Create buffers for cc
err = hipMalloc(&cc_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc cc_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = hipMemcpy(row_d, csr->row_array, num_nodes * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy row_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(col_d, csr->col_array, num_edges * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy col_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy inrow_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy incol_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
hipDeviceSetLimit(hipLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
ChiVertex<int, int> **vertex;
GraphChiContext *context;
err = hipMalloc(&vertex, num_nodes * sizeof(ChiVertex<int, int> *));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc vertex (size:%d) => %s\n", num_edges,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&context, sizeof(GraphChiContext));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc context (size:%d) => %s\n",
num_edges, hipGetErrorString(err));
return -1;
}
printf("Start initCtx\n");
hipLaunchKernelGGL(( initContext), dim3(1), dim3(1), 0, 0, context, num_nodes, num_edges);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initCtx failed (%s)\n",
hipGetErrorString(err));
return -1;
}
printf("Start initObj\n");
hipLaunchKernelGGL(( initObject), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d, inrow_d,
incol_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n",
hipGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
hipLaunchKernelGGL(( initOutEdge), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n",
hipGetErrorString(err));
return -1;
}
// Run CC for some iter. TO: convergence determination
for (int i = 0; i < ITER; i++) {
printf("Start ConnectedComponent\n");
hipLaunchKernelGGL(( ConnectedComponent), dim3(grid), dim3(threads), 0, 0, vertex, context, i);
printf("Finish ConnectedComponent\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n",
hipGetErrorString(err));
return -1;
}
}
hipDeviceSynchronize();
double timer4 = gettime();
printf("Start Copyback\n");
hipLaunchKernelGGL(( copyBack), dim3(grid), dim3(threads), 0, 0, vertex, context, cc_d);
printf("End Copyback\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n",
hipGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = hipMemcpy(rank_array, cc_d, num_nodes * sizeof(int),
hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy() failed (%s)\n",
hipGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
hipFree(row_d);
hipFree(col_d);
hipFree(inrow_d);
hipFree(incol_d);
hipFree(cc_d);
return 0;
}
void print_vectorf(int *vector, int num) {
FILE *fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%d\n", vector[i]);
}
fclose(fp);
}
| 70b1a10562995279c48b9869cc31a7fac5b08f42.cu | // clang-format off
/************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR"�) (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
// clang-format on
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "../../graph_parser/parse.h"
#include "../../graph_parser/util.h"
#include "kernel.cu"
// Iteration count
#define ITER 20
void print_vectorf(int *vector, int num);
int main(int argc, char **argv) {
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
cudaError_t err = cudaSuccess;
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
int *rank_array = (int *)malloc(num_nodes * sizeof(int));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
int *cc_d;
// Create device-side buffers for the graph
err = cudaMalloc(&row_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc row_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&col_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d) => %s\n", num_edges,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&inrow_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc inrow_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&incol_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc incol_d (size:%d) => %s\n",
num_edges, cudaGetErrorString(err));
return -1;
}
// Create buffers for cc
err = cudaMalloc(&cc_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc cc_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = cudaMemcpy(row_d, csr->row_array, num_nodes * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy row_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy inrow_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy incol_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
ChiVertex<int, int> **vertex;
GraphChiContext *context;
err = cudaMalloc(&vertex, num_nodes * sizeof(ChiVertex<int, int> *));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc vertex (size:%d) => %s\n", num_edges,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&context, sizeof(GraphChiContext));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc context (size:%d) => %s\n",
num_edges, cudaGetErrorString(err));
return -1;
}
printf("Start initCtx\n");
initContext<<<1, 1>>>(context, num_nodes, num_edges);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initCtx failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
printf("Start initObj\n");
initObject<<<grid, threads>>>(vertex, context, row_d, col_d, inrow_d,
incol_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
initOutEdge<<<grid, threads>>>(vertex, context, row_d, col_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
// Run CC for some iter. TO: convergence determination
for (int i = 0; i < ITER; i++) {
printf("Start ConnectedComponent\n");
ConnectedComponent<<<grid, threads>>>(vertex, context, i);
printf("Finish ConnectedComponent\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
}
cudaDeviceSynchronize();
double timer4 = gettime();
printf("Start Copyback\n");
copyBack<<<grid, threads>>>(vertex, context, cc_d);
printf("End Copyback\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = cudaMemcpy(rank_array, cc_d, num_nodes * sizeof(int),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy() failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
cudaFree(row_d);
cudaFree(col_d);
cudaFree(inrow_d);
cudaFree(incol_d);
cudaFree(cc_d);
return 0;
}
void print_vectorf(int *vector, int num) {
FILE *fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%d\n", vector[i]);
}
fclose(fp);
}
|
f321c8617d119e49cfa067fb78101a4734d24dbc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/copy.h>
#include <ctime>
#include <algorithm>
//#include "cutil_math.h"
//#include "cutil_inline.h"
#ifndef PI
#define PI 3.14159265358979323846
#endif
#define THREADSPERBLOCK 1024
unsigned int nextPow2(unsigned int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
#ifndef MIN
#define MIN(x,y) ((x < y) ? x : y)
#endif
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ T __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ T __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
//template<>
//struct SharedMemory<double>
//{
// __device__ inline operator double *()
// {
// extern __shared__ double __smem_d[];
// return (double *)__smem_d;
// }
//
// __device__ inline operator const double *() const
// {
// extern __shared__ double __smem_d[];
// return (double *)__smem_d;
// }
//};
// specialize for hipDoubleComplex to avoid unaligned memory
// access compile errors
template<>
struct SharedMemory<hipDoubleComplex>
{
__device__ inline operator hipDoubleComplex *()
{
extern __shared__ hipDoubleComplex __smem_d[];
return (hipDoubleComplex *)__smem_d;
}
__device__ inline operator const hipDoubleComplex *() const
{
extern __shared__ hipDoubleComplex __smem_d[];
return (hipDoubleComplex *)__smem_d;
}
};
struct squareCuDoubleComplex
{
__host__ __device__
hipDoubleComplex operator()(const hipDoubleComplex& x) const {
return make_cuDoubleComplex(cuCabs(x),0.0);
//return cuCmul(x,cuCmul(make_cuDoubleComplex(9.5367e-7,0),x));
}
};
struct addCuDoubleComplex
{
__host__ __device__
hipDoubleComplex operator()(const hipDoubleComplex& x, const hipDoubleComplex& y) const {
return cuCadd(x,y);
}
};
////////////////////////////////////////////////////////////////////////////////
// Compute the number of threads and blocks to use for the given reduction kernel
// For the kernels >= 3, we set threads / block to the minimum of maxThreads and
// n/2. For kernels < 3, we set to the minimum of maxThreads and n. For kernel
// 6, we observe the maximum specified number of blocks, because each thread in
// that kernel can process a variable number of elements.
////////////////////////////////////////////////////////////////////////////////
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
hipError_t error;
//get device capability, to avoid block/grid size excceed the upbound
hipDeviceProp_t prop;
int device;
(hipGetDevice(&device));
// check device
error = hipGetDeviceProperties(&prop, device);
if (error != hipSuccess)
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipGetDeviceProperties returned error code " << error << " line: " << __LINE__ << "...\n";
}
if (whichKernel < 3)
{
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
if (threads*blocks > prop.maxGridSize[0] * prop.maxThreadsPerBlock)
{
printf("n is too large, please choose a smaller number!\n");
}
if (blocks > prop.maxGridSize[0])
{
printf("Grid size <%d> excceeds the device capability <%d>, set block size as %d (original %d)\n",
blocks, prop.maxGridSize[0], threads*2, threads);
blocks /= 2;
threads *= 2;
}
if (whichKernel == 6)
{
blocks = MIN(maxBlocks, blocks);
}
}
// Complex pointwise multiplication
//__global__ void complexPointwiseMul_kernel(hipDoubleComplex* a, const hipDoubleComplex* b, int size)
//{
// const int numThreads = blockDim.x * gridDim.x;
// const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
// for (int i = threadID; i < size; i += numThreads)
// a[i] = cuCmul(a[i], b[i]);
//}
// Complex pointwise multiplication
//__global__ void complexPointwiseMulandScale_kernel(hipDoubleComplex* a, const hipDoubleComplex* b, int size, double scale)
//{
// const int numThreads = blockDim.x * gridDim.x;
// const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
// for (int i = threadID; i < size; i += numThreads)
// {
// a[i] = cuCmul(a[i], b[i]);
// a[i].x = a[i].x*scale;
// a[i].y = a[i].y*scale;
// }
//}
// create Wavefront
// our field is a 1D-vector representation of a 2D-field in row major format. x is along rows
__global__ void defocField_kernel(hipDoubleComplex* d_pField, ConfPoint_KernelParams *d_pParams)
{
unsigned int xGes=(blockIdx.x*blockDim.x+threadIdx.x);
unsigned int yGes=(blockIdx.y*blockDim.y+threadIdx.y);
//unsigned int yGesBase=(blockIdx.y*blockDim.y+threadIdx.y);
//while (xGes<s_n)
//{
// unsigned int yGes=yGesBase;
// while (yGes <s_n)
// {
if (xGes+yGes*d_pParams->n < d_pParams->n*d_pParams->n)
{
double x=double(xGes)*(d_pParams->gridWidth/d_pParams->n)-d_pParams->gridWidth/2;
double y=double(yGes)*(d_pParams->gridWidth/d_pParams->n)-d_pParams->gridWidth/2;
double sigmaX=sin(atan(-x/(160/d_pParams->magnif)));
double sigmaY=sin(atan(-y/(160/d_pParams->magnif)));
double sigmaZ=(1-sigmaX*sigmaX-sigmaY*sigmaY);
if (sigmaZ<0)
sigmaZ=0;
else
sigmaZ=sqrt(sigmaZ);
// calc defocus
if (xGes+yGes*d_pParams->n < d_pParams->n*d_pParams->n)
{
// calc defocus phase
double phase=-2*PI/d_pParams->wvl*sigmaZ*d_pParams->scanStep.z;
d_pField[xGes+d_pParams->n*yGes]=cuCmul(d_pField[xGes+d_pParams->n*yGes],make_cuDoubleComplex(cos(phase),sin(phase)));
//d_pField[xGes+s_n*yGes]=make_cuDoubleComplex(cos(phase),sin(phase));
}
}
// yGes+=blockDim.y*gridDim.y;
// }
// xGes+=blockDim.x*gridDim.x;
//}
}
__global__ void objectInteractionTEA(hipDoubleComplex* pObjField, ConfPoint_KernelParams* pParams, ConfPoint_KernelObjectParams* pObjParams, unsigned int jx)
{
__shared__ unsigned int s_n;
__shared__ double s_k, s_NA, s_gridWidth, s_f, s_kN, s_A, s_delta1;
// load shared memory
if (threadIdx.x==0)
{
s_k=2*PI/pParams->wvl;
s_NA=pParams->NA;
s_gridWidth=pParams->gridWidth;
s_f=160/pParams->magnif; // we assume a tubus length of 160mm here
s_n=pParams->n;
s_kN=pObjParams->kN;
s_A=pObjParams->A;
s_delta1=pParams->scanStep.x;
}
__syncthreads();
unsigned int xGes=(blockIdx.x*blockDim.x+threadIdx.x);
unsigned int yGes=(blockIdx.y*blockDim.y+threadIdx.y);
unsigned int xGesObj;
if (xGes+yGes*s_n < s_n*s_n)
{
// calc sampling distance in object grid
double delta2=(2*PI/s_k)*s_f/s_gridWidth;
// we have to consider the fact that pObjField is not fftshifted...
if (xGes < s_n/2)
xGesObj=xGes+s_n/2;
else
xGesObj=xGes-s_n/2;
// calc coordinates in fftshifted object grid
double x=jx*s_delta1+((-double(s_n)/2)+xGesObj)*delta2;
double phase_obj=-s_k*s_A*cos(s_kN*x);
// multiply field with object phase phase
pObjField[xGes+yGes*s_n]=cuCmul(pObjField[xGes+yGes*s_n],make_cuDoubleComplex(cos(phase_obj),sin(phase_obj)));
}
}
__global__ void createField_kernel(hipDoubleComplex* pPupField, ConfPoint_KernelParams* pParams)
{
__shared__ double s_aberrVec[16];
__shared__ double s_gridWidth, s_magnif, s_k, s_NA, s_deltaZ, s_apodRadius, s_f;
__shared__ unsigned int s_n;
// load aberration coeffs in shared memory
if (threadIdx.x < 16)
{
s_aberrVec[threadIdx.x]=pParams->pAberrVec[threadIdx.x];
if (threadIdx.x==0)
{
s_gridWidth=pParams->gridWidth;
s_magnif=pParams->magnif;
s_k=2*PI/pParams->wvl;
s_NA=pParams->NA;
s_n=pParams->n;
s_deltaZ=-pParams->scanStep.z*pParams->scanNumber.z/2;
s_apodRadius=pParams->apodisationRadius;
s_f=160/pParams->magnif;// we assume a tubus length of 160mm here to calculate the focal length of the objective lens
}
}
__syncthreads();
unsigned int xGes=(blockIdx.x*blockDim.x+threadIdx.x);
unsigned int yGes=(blockIdx.y*blockDim.y+threadIdx.y);
//unsigned int xGes=(blockIdx.x*blockDim.x+threadIdx.x);
//unsigned int yGesBase=(blockIdx.y*blockDim.y+threadIdx.y);
//while (xGes<s_n)
//{
// unsigned int yGes=yGesBase;
// while (yGes <s_n)
// {
if (xGes+yGes*s_n < s_n*s_n)
{
// calc coordinates in pupil grid
double x=double(xGes)*(s_gridWidth/s_n)-s_gridWidth/2;
double y=double(yGes)*(s_gridWidth/s_n)-s_gridWidth/2;
// calc width of pupil
double wPup=tan(asin(s_NA))*2*s_f;
double rho=sqrt(x*x+y*y)/(wPup/2); // normalized radial coordinate in pupil
double apodRad=s_apodRadius/(wPup/2); // normalized apodisation radius
double phi=atan2(y,x);
// calc initial defocus
double sigmaX=sin(atan(-x/s_f));
double sigmaY=sin(atan(-y/s_f));
double sigmaZ=(1-sigmaX*sigmaX-sigmaY*sigmaY);
if (sigmaZ>=0)
sigmaZ=sqrt(sigmaZ);
else
sigmaZ=0;
double cosThetaZ=abs(s_f/sqrt(x*x+y*y+s_f*s_f));
double apod=sqrt(cosThetaZ);
if (rho<=1)
{
// calc defocus phase
double phase_defoc=-s_k*sigmaZ*s_deltaZ;
double phase_aberr=s_k*(pParams->pAberrVec[0]
+pParams->pAberrVec[1]*rho*cos(phi)
+pParams->pAberrVec[2]*rho*sin(phi)
+pParams->pAberrVec[3]*(2*rho*rho-1)
+pParams->pAberrVec[4]*(rho*rho*cos(2*phi))
+pParams->pAberrVec[5]*(rho*rho*sin(2*phi))
+pParams->pAberrVec[6]*(3*pow(rho,3)-2*rho)*cos(phi)
+pParams->pAberrVec[7]*(3*pow(rho,3)-2*rho)*sin(phi)
+pParams->pAberrVec[8]*(6*pow(rho,4)-6*rho*rho+1)
+pParams->pAberrVec[9]*pow(rho,3)*cos(3*phi)
+pParams->pAberrVec[10]*pow(rho,3)*sin(3*phi)
+pParams->pAberrVec[11]*(4*pow(rho,4)-3*pow(rho,2))*cos(2*phi)
+pParams->pAberrVec[12]*(4*pow(rho,4)-3*pow(rho,2))*sin(2*phi)
+pParams->pAberrVec[13]*(10*pow(rho,5)-12*pow(rho,3)+3*rho)*cos(phi)
+pParams->pAberrVec[14]*(10*pow(rho,5)-12*pow(rho,3)+3*rho)*sin(phi)
+pParams->pAberrVec[15]*(20*pow(rho,6)-30*pow(rho,4)+12*pow(rho,2)-1));
double ampl=exp(-rho*rho/(apodRad*apodRad));
// create real and imaginary part of field with unity modulus and our phase
pPupField[xGes+yGes*s_n]=make_cuDoubleComplex(apod*ampl*cos(phase_aberr+phase_defoc),apod*ampl*sin(phase_aberr+phase_defoc));
//pPupField[xGes+yGes*pParams->n]=make_cuDoubleComplex(1.0,0.0);
}
else
pPupField[xGes+yGes*s_n]=make_cuDoubleComplex(0, 0);
}
// yGes+=blockDim.y*gridDim.y;
// }
// xGes+=blockDim.x*gridDim.x;
//}
}
//__global__ void kernel(int *a, int*b)
//{
// int tx = threadIdx.x;
//
// switch(tx)
// {
// case 0:
// *a=*a+10;
// break;
// case 1:
// *b=*b+3;
// break;
// default:
// break;
// }
//}
__global__ void scalar_RichardsonWolf_kernel(hipDoubleComplex* Uin_ptr, double* x1_ptr, double* y1_ptr, double* x2_ptr, double*y2_ptr, unsigned int dimx, unsigned int dimy, unsigned int TileWidth, unsigned int TileHeight, double wvl, double f, double Dz)
{
unsigned int jx=blockIdx.x*TileWidth+threadIdx.x;
unsigned int jy=blockIdx.y*TileHeight+threadIdx.y;
double dx1=abs(x1_ptr[1]-x1_ptr[0]);
double dy1=abs(y1_ptr[1]-y1_ptr[0]);
x2_ptr[jx]=(-1.0*dimx/2+jx)/(dimx*dx1)*wvl*f;
y2_ptr[jy]=(-1.0*dimy/2+jy)/(dimy*dy1)*wvl*f;
double sigmaX=-x1_ptr[jx]/f;
double sigmaY=-y1_ptr[jy]/f;
double GktSqr=1-sigmaX*sigmaX-sigmaY*sigmaY;
// free space propagation filters out evanescent waves...
if (GktSqr<0)
{
GktSqr=0.0;
Uin_ptr[jx+jy*dimy]=make_cuDoubleComplex(0.0,0.0);
}
else
{
// this looks kind of ugly because cudas complex<double> implementation doesn't have any operator notation...
//Uin_ptr[jx+jy*dimy]=make_cuDoubleComplex(0.0,-1.0)*f*Uin_ptr[jx+jy*dimy]/pow(make_cuDoubleComplex(1-sigmaX*sigmaX-sigmaY*sigmaY,0.0),0.25)*make_cuDoubleComplex(cos(2*PI/wvl*Dz*sqrt(GktSqr)),sin(2*PI/wvl*Dz*sqrt(GktSqr)));
hipDoubleComplex help=cuCmul(make_cuDoubleComplex(f/pow(1-sigmaX*sigmaX-sigmaY*sigmaY,0.25),0.0),Uin_ptr[jx+jy*dimy]);
help=cuCmul(make_cuDoubleComplex(0.0,-1.0),help);
Uin_ptr[jx+jy*dimy]=cuCmul(help,make_cuDoubleComplex(cos(2*PI/wvl*Dz*sqrt(GktSqr)),sin(2*PI/wvl*Dz*sqrt(GktSqr))));
}
}
//****************************************************************/
// wrappers
//****************************************************************/
bool cu_scalarRichardsonWolf_wrapper(hipDoubleComplex* Uin_ptr, unsigned int dimx, unsigned int dimy, double wvl, double* x1_ptr, double* y1_ptr, double f, double Dz, double** x2_ptrptr, double** y2_ptrptr)
{
// we handle only regularly squared grids here
if (dimx!=dimy)
return 0;
double k=2*PI/wvl;
double dx1=abs(x1_ptr[0]-x1_ptr[1]);
double dy1=abs(y1_ptr[0]-y1_ptr[1]);
if (dx1!=dy1)
return 0;
// transfer data to GPU
double* x2_kernel_ptr;
(hipMalloc((void**)&x2_kernel_ptr, sizeof(double)*dimx));
double* y2_kernel_ptr;
(hipMalloc((void**)&y2_kernel_ptr, sizeof(double)*dimy));
double* x1_kernel_ptr;
(hipMalloc((void**)&x1_kernel_ptr, sizeof(double)*dimx));
(hipMemcpy(x1_kernel_ptr, x1_ptr, sizeof(double)*dimx, hipMemcpyHostToDevice));
double* y1_kernel_ptr;
(hipMalloc((void**)&y1_kernel_ptr, sizeof(double)*dimy));
(hipMemcpy(y1_kernel_ptr, y1_ptr, sizeof(double)*dimy, hipMemcpyHostToDevice));
hipDoubleComplex* Uin_kernel_ptr;
(hipMalloc((void**)&Uin_kernel_ptr, sizeof(hipDoubleComplex)*dimx*dimy));
(hipMemcpy(Uin_kernel_ptr, Uin_ptr, sizeof(hipDoubleComplex)*dimx*dimy, hipMemcpyHostToDevice));
unsigned int tileWidth=16;
unsigned int tileHeight=16;
dim3 dimBlock(tileWidth,tileHeight,1); // number of threads within each block in x,y,z (maximum of 512 in total. I.e. 512,1,1 or 8,16,2 or ...
dim3 dimGrid(dimx/tileWidth,dimy/tileHeight,1); // number of blocks in x,y,z (maximum of 65535 for each dimension)
hipLaunchKernelGGL(( scalar_RichardsonWolf_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, (hipDoubleComplex*)Uin_kernel_ptr, x1_kernel_ptr, y1_kernel_ptr, x2_kernel_ptr, y2_kernel_ptr, dimx, dimy, tileWidth, tileHeight, wvl, f, Dz);
// allocate host memory for observation plane coordinates
double *x2_l=(double*)calloc(dimx,sizeof(double));
double *y2_l=(double*)calloc(dimy,sizeof(double));
// transfer coordinates from GPU
(hipMemcpy(x2_l, x2_kernel_ptr, sizeof(double)*dimx, hipMemcpyDeviceToHost));
(hipMemcpy(y2_l, y2_kernel_ptr, sizeof(double)*dimy, hipMemcpyDeviceToHost));
//deallocate coordinates on GPU
hipFree(x1_kernel_ptr);
hipFree(x2_kernel_ptr);
hipFree(y1_kernel_ptr);
hipFree(y2_kernel_ptr);
// do fft
// plan fft
hipfftHandle plan;
(hipfftPlan2d(&plan,dimx, dimy, HIPFFT_Z2Z));
// execute fft
(hipfftExecZ2Z(plan, (hipfftDoubleComplex *)Uin_kernel_ptr, (hipfftDoubleComplex *)Uin_kernel_ptr, HIPFFT_FORWARD));
// transfer optical field from GPU
(hipMemcpy(Uin_ptr, Uin_kernel_ptr, sizeof(hipDoubleComplex)*dimy*dimx, hipMemcpyDeviceToHost));
// deallocate optical field on GPU
hipFree(Uin_kernel_ptr);
// destroy fft plan
hipfftDestroy(plan);
// return pointer to new coordinates
*x2_ptrptr=x2_l;
*y2_ptrptr=y2_l;
return 1;
}
//
//void kernel_wrapper(int *a, int* b)
//{
// int *d_1, *d_2;
// dim3 threads( 2, 1);
// dim3 blocks( 1, 1);
//
// hipMalloc( (void **)&d_1, sizeof(int) );
// hipMalloc( (void **)&d_2, sizeof(int) );
//
// hipMemcpy( d_1, a, sizeof(int), hipMemcpyHostToDevice );
// hipMemcpy( d_2, b, sizeof(int), hipMemcpyHostToDevice );
//
// kernel<<< blocks, threads >>>( a, b );
//
// hipMemcpy( a, d_1, sizeof(int), hipMemcpyDeviceToHost );
// hipMemcpy( b, d_2, sizeof(int), hipMemcpyDeviceToHost );
//
// hipFree(d_1);
// hipFree(d_2);
//}
//
bool cu_angularSpectrumScaled_wrapper(hipDoubleComplex* Uin_ptr, unsigned int dimx, unsigned int dimy, double wvl, double* x1_ptr, double* y1_ptr, double Dz, double dx2, double** x2_ptrptr, double** y2_ptrptr)
{
// Allocate device memory for filter kernel
hipDoubleComplex* Uin_kernel_ptr;
(hipMalloc((void**)&Uin_kernel_ptr, sizeof(hipDoubleComplex)*dimx*dimy));
// Copy host memory to device
(hipMemcpy(Uin_kernel_ptr, Uin_ptr, sizeof(hipDoubleComplex)*dimx*dimy,
hipMemcpyHostToDevice));
// CUFFT plan
hipfftHandle plan;
(hipfftPlan2d(&plan,dimx, dimy, HIPFFT_Z2Z));
// Transform signal and kernel
printf("Transforming signal hipfftExecC2C\n");
(hipfftExecZ2Z(plan, (hipfftDoubleComplex *)Uin_kernel_ptr, (hipfftDoubleComplex *)Uin_kernel_ptr, HIPFFT_FORWARD));
// copy device memory back to host
(hipMemcpy(Uin_ptr, Uin_kernel_ptr, sizeof(hipDoubleComplex)*dimx*dimy, hipMemcpyDeviceToHost));
return true;
}
//
//bool cu_angularSpectrum_ABCD(complex<double>* Uin_ptr, unsigned int dimx, unsigned int dimy, double wvl, double* x1_ptr, double* y1_ptr, double Dz, double dx2, double* ABCD, double** x2_ptrptr, double** y2_ptrptr)
//{
// return true;
//}
//
//bool cu_fresnel_two_step_1D(complex<double>* Uin_ptr, unsigned int dimx, double wvl, double* x1_ptr, double dx2, double Dz, double** x2_ptrptr )
//{
// return true;
//}
//
//bool cu_fresnel_two_step(complex<double>* Uin_ptr, unsigned int dimx, unsigned int dimy, double wvl, double* x1_ptr, double* y1_ptr, double dx2, double Dz, double** x2_ptrptr, double** y2_ptrptr)
//{
// return true;
//}
//
//bool cu_fresnel_one_step_1D(complex<double>* Uin_ptr, unsigned int dimx, double wvl, double* x1_ptr, double Dz, double** x2_ptrptr )
//{
// return true;
//}
//
//bool cu_fresnel_one_step(complex<double>* Uin_ptr, unsigned int dimx, unsigned int dimy, double wvl, double* x1_ptr, double* y1_ptr, double Dz, double** x2_ptrptr, double** y2_ptrptr)
//{
// return true;
//}
//
//bool cu_scalar_RichardsonWolf(complex<double>* Uin_ptr, unsigned int dimx, unsigned int dimy, double wvl, double* x1_ptr, double* y1_ptr, double f, double Dz, double** x2_ptrptr, double** y2_ptrptr)
//{
// // Allocate device memory
// complex<double>* Uin_kernel_ptr;
// cutilSafeCall(hipMalloc((void**)&Uin_kernel_ptr, sizeof(complex<double>)*dimx*dimy));
//
// // Copy host memory to device
// cutilSafeCall(hipMemcpy(Uin_kernel_ptr, Uin_ptr, sizeof(complex<double>)*dimx*dimy, hipMemcpyHostToDevice));
//
// // Allocate device memory
// double* x1_kernel_ptr;
// cutilSafeCall(hipMalloc((void**)&x1_kernel_ptr, sizeof(double)*dimx));
//
// // Copy host memory to device
// cutilSafeCall(hipMemcpy(x1_kernel_ptr, x1_ptr, sizeof(double)*dimx,
// hipMemcpyHostToDevice));
//
// // Allocate device memory
// double* y1_kernel_ptr;
// cutilSafeCall(hipMalloc((void**)&y1_kernel_ptr, sizeof(double)*dimy));
//
// // Copy host memory to device
// cutilSafeCall(hipMemcpy(y1_kernel_ptr, y1_ptr, sizeof(double)*dimy,
// hipMemcpyHostToDevice));
//
// // allocate host memory
// *x2_ptrptr=(double*)calloc(dimx,sizeof(double));
// *y2_ptrptr=(double*)calloc(dimy,sizeof(double));
//
// // Allocate device memory
// double* x2_kernel_ptr;
// cutilSafeCall(hipMalloc((void**)&x2_kernel_ptr, sizeof(double)*dimx));
//
// // Allocate device memory
// double* y2_kernel_ptr;
// cutilSafeCall(hipMalloc((void**)&y2_kernel_ptr, sizeof(double)*dimy));
// // do the scaling
// cu_scalar_RichardsonWolf_kernel<<<32,512>>>(reinterpret_cast<hipfftDoubleComplex*>(Uin_kernel_ptr), dimx, dimy, wvl, x1_kernel_ptr, y1_kernel_ptr, f, Dz, x2_kernel_ptr, y2_kernel_ptr);
//
// // do the fft
// // CUFFT plan
// hipfftHandle plan;
// cufftSafeCall(hipfftPlan2d(&plan,dimx, dimy, HIPFFT_Z2Z));
//
// // execution
// cufftSafeCall(hipfftExecZ2Z(plan, (hipfftDoubleComplex *)Uin_kernel_ptr, (hipfftDoubleComplex *)Uin_kernel_ptr, HIPFFT_FORWARD));
//
// // do the ffthift in a kernel....
//
// // copy device memory back to host
// cutilSafeCall(hipMemcpy(Uin_ptr, Uin_kernel_ptr, sizeof(complex<double>)*dimx*dimy, hipMemcpyDeviceToHost));
// cutilSafeCall(hipMemcpy(*x2_ptrptr, x2_kernel_ptr, sizeof(double)*dimx, hipMemcpyDeviceToHost));
// cutilSafeCall(hipMemcpy(*y2_ptrptr, y2_kernel_ptr, sizeof(double)*dimy, hipMemcpyDeviceToHost));
//
// return true;
//}
//
//bool cu_fraunhofer(complex<double>* Uin_ptr, unsigned int dimx, unsigned int dimy, double wvl, double* x1_ptr, double* y1_ptr, double Dz, double** x2_ptrptr, double** y2_ptrptr)
//{
// return true;
//}
//
//bool cu_fraunhofer_1D(complex<double>* Uin_ptr, unsigned int dimx, double wvl, double* x1_ptr, double Dz, double** x2_ptrptr)
//{
// return true;
//}
//
//bool cu_fftshift(complex<double>* in, unsigned int dimx, unsigned int dimy)
//{
// return true;
//}
//
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory.
*/
__global__ void reduce_kernel3_overlap(hipDoubleComplex *g_idata, hipDoubleComplex *g_odata, unsigned int n)
{
hipDoubleComplex *sdata = SharedMemory<hipDoubleComplex>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
hipDoubleComplex mySum = (i < n) ? cuCmul(g_idata[i],g_idata[i]) : make_cuDoubleComplex(0.0,0.0);
if (i + blockDim.x < n)
mySum = cuCadd(mySum, cuCmul(g_idata[i+blockDim.x],g_idata[i+blockDim.x]));
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = cuCadd(mySum, sdata[tid + s]);
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory.
*/
__global__ void reduce_kernel3_final(hipDoubleComplex *g_idata, double *g_odata, unsigned int *g_index, unsigned int n)
{
hipDoubleComplex *sdata = SharedMemory<hipDoubleComplex>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
hipDoubleComplex mySum = (i < n) ? g_idata[i] : make_cuDoubleComplex(0.0,0.0);
if (i + blockDim.x < n)
mySum = cuCadd(mySum, g_idata[i+blockDim.x]);
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = cuCadd(mySum, sdata[tid + s]);
}
__syncthreads();
}
// write result for this block to global mem for raw signal
if (tid == 0)
{
g_odata[(*g_index)] = cuCabs(sdata[0])*cuCabs(sdata[0]);
*g_index=*g_index+1; // increment index
}
}
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory.
*/
__global__ void reduce_kernel3(hipDoubleComplex *g_idata, hipDoubleComplex *g_odata, unsigned int n)
{
hipDoubleComplex *sdata = SharedMemory<hipDoubleComplex>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
hipDoubleComplex mySum = (i < n) ? g_idata[i] : make_cuDoubleComplex(0.0,0.0);
if (i + blockDim.x < n)
mySum = cuCadd(mySum, g_idata[i+blockDim.x]);
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = cuCadd(mySum, sdata[tid + s]);
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize, bool nIsPow2> __global__ void reduce6(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
mySum += g_idata[i+blockSize];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512)
{
if (tid < 256)
{
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
__syncthreads();
}
if (blockSize >= 256)
{
if (tid < 128)
{
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
__syncthreads();
}
if (blockSize >= 128)
{
if (tid < 64)
{
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
__syncthreads();
}
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile T *smem = sdata;
if (blockSize >= 64)
{
smem[tid] = mySum = mySum + smem[tid + 32];
}
if (blockSize >= 32)
{
smem[tid] = mySum = mySum + smem[tid + 16];
}
if (blockSize >= 16)
{
smem[tid] = mySum = mySum + smem[tid + 8];
}
if (blockSize >= 8)
{
smem[tid] = mySum = mySum + smem[tid + 4];
}
if (blockSize >= 4)
{
smem[tid] = mySum = mySum + smem[tid + 2];
}
if (blockSize >= 2)
{
smem[tid] = mySum = mySum + smem[tid + 1];
}
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
////////////////////////////////////////////////////////////////////////////////
// This function performs a reduction of the input data multiple times and
// measures the average reduction time.
////////////////////////////////////////////////////////////////////////////////
void reduceOverlap(int n,
int numThreads,
int numBlocks,
int maxThreads,
int maxBlocks,
hipDoubleComplex *d_idata,
hipDoubleComplex *d_odata,
double *d_rawSig,
double *h_rawSig,
unsigned int *d_index)
{
dim3 dimBlock(numThreads,1,1);
dim3 dimGrid(numBlocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (numThreads <= 32) ? 2 * numThreads * sizeof(hipDoubleComplex) : numThreads * sizeof(hipDoubleComplex);
// execute the kernel (with squaring of the field values)
hipLaunchKernelGGL(( reduce_kernel3_overlap), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, n);
hipDoubleComplex *h_pOutData;
h_pOutData=(hipDoubleComplex*)malloc(numBlocks*sizeof(hipDoubleComplex));
hipMemcpy(h_pOutData, d_odata, numBlocks*sizeof(hipDoubleComplex), hipMemcpyDeviceToHost);
// sum partial block sums on GPU
int s=numBlocks;
while (s > 1)
{
int l_threads = 0, l_blocks = 0;
getNumBlocksAndThreads(3, s, maxBlocks, maxThreads, l_blocks, l_threads);
dim3 l_dimBlock(l_threads,1,1);
dim3 l_dimGrid(l_blocks,1,1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (numThreads <= 32) ? 2 * numThreads * sizeof(hipDoubleComplex) : numThreads * sizeof(hipDoubleComplex);
// the last reduction executes in only one block. The result of this is the final result of our overlap integral and needs to be saved in global memory for the raw signal
if (l_blocks==1)
hipLaunchKernelGGL(( reduce_kernel3_final), dim3(l_dimGrid), dim3(l_dimBlock), smemSize, 0, d_odata, d_rawSig, d_index, s);
else
// execute pure reduction kernel
hipLaunchKernelGGL(( reduce_kernel3), dim3(l_dimGrid), dim3(l_dimBlock), smemSize, 0, d_odata, d_odata, s);
// update number of active blocks
s = (s + (l_threads*2-1)) / (l_threads*2);
}
unsigned int index;
hipMemcpy(&index, d_index, sizeof(unsigned int), hipMemcpyDeviceToHost);
// copy final sum from device to host
hipMemcpy(h_rawSig, d_rawSig, sizeof(double), hipMemcpyDeviceToHost);
}
///*
// This version adds multiple elements per thread sequentially. This reduces the overall
// cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
// (Brent's Theorem optimization)
//
// Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
// In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
// If blockSize > 32, allocate blockSize*sizeof(T) bytes.
//*/
//template <unsigned int blockSize, bool nIsPow2> __global__ void calcOverlap(hipDoubleComplex *g_idata, hipDoubleComplex *g_odata, unsigned int n)
//{
// hipDoubleComplex *sdata = SharedMemory<hipDoubleComplex>();
//
// // perform first level of reduction,
// // reading from global memory, writing to shared memory
// unsigned int tid = threadIdx.x;
// unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
// unsigned int gridSize = blockSize*2*gridDim.x;
//
// hipDoubleComplex mySum = make_cuDoubleComplex(0.0,0.0);
//
// // we reduce multiple elements per thread. The number is determined by the
// // number of active thread blocks (via gridDim). More blocks will result
// // in a larger gridSize and therefore fewer elements per thread
// while (i < n)
// {
// mySum = cuCadd(mySum, cuCmul(g_idata[i],g_idata[i]));
//
// // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
// if (nIsPow2 || i + blockSize < n)
// mySum = cuCadd(mySum, cuCmul(g_idata[i+blockSize],g_idata[i+blockSize]));
//
// i += gridSize;
// }
//
// // each thread puts its local sum into shared memory
// sdata[tid] = mySum;
// __syncthreads();
//
//
// // do reduction in shared mem
// if (blockSize >= 512)
// {
// if (tid < 256)
// {
// sdata[tid] = mySum = cuCadd(mySum,sdata[tid + 256]);
// }
//
// __syncthreads();
// }
//
// if (blockSize >= 256)
// {
// if (tid < 128)
// {
// sdata[tid] = mySum = cuCadd(mySum, sdata[tid + 128]);
// }
//
// __syncthreads();
// }
//
// if (blockSize >= 128)
// {
// if (tid < 64)
// {
// sdata[tid] = mySum = cuCadd(mySum, sdata[tid + 64]);
// }
//
// __syncthreads();
// }
//
// if (tid < 32)
// {
// // now that we are using warp-synchronous programming (below)
// // we need to declare our shared memory volatile so that the compiler
// // doesn't reorder stores to it and induce incorrect behavior.
// //volatile hipDoubleComplex *smem = sdata;
// volatile hipDoubleComplex *smem = sdata;
//
// if (blockSize >= 64)
// {
// smem[tid] = mySum = cuCadd(mySum, smem[tid + 32]);
// }
//
// if (blockSize >= 32)
// {
// mySum=cuCadd(mySum, smem[tid + 16]);
// smem[tid] = mySum;// = cuCadd(mySum, smem[tid + 16]);
// }
//
// if (blockSize >= 16)
// {
// smem[tid] = mySum = cuCadd(mySum, smem[tid + 8]);
// }
//
// if (blockSize >= 8)
// {
// smem[tid] = mySum = cuCadd(mySum, smem[tid + 4]);
// }
//
// if (blockSize >= 4)
// {
// smem[tid] = mySum = cuCadd(mySum, smem[tid + 2]);
// }
//
// if (blockSize >= 2)
// {
// smem[tid] = mySum = cuCadd(mySum, smem[tid + 1]);
// }
// }
//
// // write result for this block to global mem
// //if (tid == 0)
// // g_odata[blockIdx.x] = sdata[0];
//}
double cu_testReduce_wrapper()
{
hipDeviceProp_t deviceProp;
hipError_t error;
// check device
error = hipGetDeviceProperties(&deviceProp, 0);
if (error != hipSuccess)
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipGetDeviceProperties returned error code " << error << " line: " << __LINE__ << "...\n";
return 0;
}
// use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
int n=1024*1024;
hipDoubleComplex *inData;
inData=(hipDoubleComplex*)malloc(n*sizeof(hipDoubleComplex));
hipDoubleComplex *d_inData;
for (unsigned int i=0; i<n; ++i)
{
inData[i]=make_cuDoubleComplex(2.0,0.0);
}
if (hipSuccess != hipMalloc((void**)&d_inData, n*sizeof(hipDoubleComplex)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
// raw signal
double *d_rawSig;
double rawSig=0.0;
if (hipSuccess != hipMalloc((void**)&d_rawSig, sizeof(double)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
if (hipSuccess != hipMemcpy(d_rawSig, &rawSig, sizeof(double), hipMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
// index of raw signal
unsigned int *d_index;
unsigned int index=0;
if (hipSuccess != hipMalloc((void**)&d_index, sizeof(unsigned int)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
if (hipSuccess != hipMemcpy(d_index, &index, sizeof(unsigned int), hipMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
int blocksReduction;
int threadsReduction;
int maxBlocks=deviceProp.maxGridSize[0];//64; // why this number??
int maxThreads=deviceProp.maxThreadsPerBlock;
getNumBlocksAndThreads(3, n, maxBlocks, maxThreads, blocksReduction, threadsReduction);
dim3 dimBlockReduction(threadsReduction,1,1);
dim3 dimGridReduction(blocksReduction,1,1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threadsReduction <= block_size) ? 2 * threadsReduction * sizeof(hipDoubleComplex) : threadsReduction * sizeof(hipDoubleComplex);
hipDoubleComplex *d_outData;
if (hipSuccess != hipMalloc((void**)&d_outData, blocksReduction*sizeof(hipDoubleComplex)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
double outData=0;
// do the summation
reduceOverlap(n, threadsReduction, blocksReduction, maxThreads, maxBlocks, d_inData, d_outData, d_rawSig, &outData, d_index);
//reduceOverlap(params.n*params.n, threadsReduction, blocksReduction, maxThreads, maxBlocks, d_pObjField, d_pOutData, h_pRawSig, d_index);
//switch (n)
//{
//case 32:
// calcOverlap<32, true><<<dimGrid, dimBlock, smemSize>>>(d_inData, d_outData, n); break;
//case 64:
// calcOverlap<64, true><<<dimGrid, dimBlock, smemSize>>>(d_inData, d_outData, n); break;
//}
hipFree(d_outData);
hipFree(d_inData);
delete inData;
return 1.0;//outData;
}
__global__ void innerProduct(Lock lock, hipDoubleComplex *field, hipDoubleComplex *out, int *outIdx, int N)
{
__shared__ hipDoubleComplex cache[THREADSPERBLOCK];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIdx=threadIdx.x;
hipDoubleComplex temp=make_cuDoubleComplex(0.0, 0.0);
// square each element
while (tid < N)
{
temp=cuCadd(temp, cuCmul(field[tid], field[tid]));
tid += blockDim.x*gridDim.x;
}
// set the cache values
cache[cacheIdx]=temp;
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of two because of the following code
int i=blockDim.x/2;
while (i != 0)
{
if (cacheIdx < i)
cache[cacheIdx] = cuCadd(cache[cacheIdx], cache[cacheIdx+i]);
__syncthreads();
i = i/2;
}
if (cacheIdx==0)
{
lock.lock();
out[*outIdx]=cuCadd(out[*outIdx], cache[0]);
lock.unlock();
}
}
__global__ void incrementIdx(int *idx)
{
*idx+=1;
}
__global__ void myReduce(Lock lock, hipDoubleComplex *field, hipDoubleComplex *out, int N)
{
__shared__ hipDoubleComplex cache[THREADSPERBLOCK];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIdx=threadIdx.x;
hipDoubleComplex temp=make_cuDoubleComplex(0.0, 0.0);
// square each element
while (tid < N)
{
temp=cuCadd(temp, cuCmul(field[tid], field[tid]));
tid += blockDim.x*gridDim.x;
}
// set the cache values
cache[cacheIdx]=temp;
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of two because of the following code
int i=blockDim.x/2;
while (i != 0)
{
if (cacheIdx < i)
cache[cacheIdx] = cuCadd(cache[cacheIdx], cache[cacheIdx+i]);
__syncthreads();
i = i/2;
}
if (cacheIdx==0)
{
lock.lock();
*out=cuCadd(*out, cache[0]);
lock.unlock();
}
}
bool cu_simConfPointRawSig_wrapperTest(double** ppRawSig, ConfPoint_KernelParams params)
{
size_t N = params.n*params.n;
hipDeviceProp_t deviceProp;
hipError_t error;
// check device
error = hipGetDeviceProperties(&deviceProp, 0);
if (error != hipSuccess)
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipGetDeviceProperties returned error code " << error << " line: " << __LINE__ << "...\n";
return 0;
}
hipDoubleComplex * l_pHostData=(hipDoubleComplex*)malloc(N*sizeof(hipDoubleComplex));
for (int i=0;i<N;i++)
{
l_pHostData[i]=make_cuDoubleComplex(10000.0,5000.0);
}
// obtain raw pointer to device memory
hipDoubleComplex * l_pDeviceData;
hipMalloc((void **) &l_pDeviceData, N * sizeof(hipDoubleComplex));
error=hipMemcpy(l_pDeviceData, l_pHostData,N*sizeof(hipDoubleComplex),hipMemcpyHostToDevice);
// transfer data to GPU
if (error != hipSuccess)
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMemcpy returned error code " << error << " line: " << __LINE__ << "...\n";
return 0;
}
*ppRawSig=(double*)calloc(1,sizeof(double));
hipDoubleComplex l_hostOutData=make_cuDoubleComplex(0.0,0.0);
hipDoubleComplex *l_pDeviceOutData;
if (hipSuccess != hipMalloc((void**)&l_pDeviceOutData, sizeof(hipDoubleComplex)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
if (hipSuccess != hipMemcpy(l_pDeviceOutData, &l_hostOutData, sizeof(hipDoubleComplex), hipMemcpyHostToDevice) )
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
Lock lock;
int blocksPerGrid=(N+THREADSPERBLOCK-1)/THREADSPERBLOCK;
hipLaunchKernelGGL(( myReduce), dim3(blocksPerGrid), dim3(1024), 0, 0, lock, l_pDeviceData, l_pDeviceOutData, N);
// copy data back from GPU
if (hipSuccess != hipMemcpy(&l_hostOutData, l_pDeviceOutData, sizeof(hipDoubleComplex), hipMemcpyDeviceToHost) )
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
(*ppRawSig)[0]=cuCabs(l_hostOutData);
// free memory
hipFree(l_pDeviceData);
hipFree(l_pDeviceOutData);
delete l_pHostData;
return true;
}
bool cu_simConfPointSensorSig_wrapper(double** ppSensorSig, ConfPoint_KernelParams params, ConfPoint_KernelObjectParams paramsObject)
{
hipDeviceProp_t deviceProp;
hipError_t error;
// timing stuff
clock_t start, end, startGes, endGes;
double msecs_DataTransfer=0;
double msecs_ObjectInteraction=0;
double msecs_Defoc=0;
double msecs_FFT=0;
double msecs_Reduce=0;
double msecs_createField=0;
double msecs=0;
// start timing
start=clock();
startGes=clock();
// check device
error = hipGetDeviceProperties(&deviceProp, 0);
if (error != hipSuccess)
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: hipGetDeviceProperties returned error code " << error << " line: " << __LINE__ << "...\n";
return false;
}
// allocate host memory for raw signal
(*ppSensorSig)=(double*)calloc(params.scanNumber.x*params.scanNumber.y,sizeof(double));
// allocate host memory for complex raw signal
hipDoubleComplex* l_pRawSig=(hipDoubleComplex*)calloc(params.scanNumber.z,sizeof(hipDoubleComplex));
hipDoubleComplex* l_pRawSigInit=(hipDoubleComplex*)calloc(params.scanNumber.z,sizeof(hipDoubleComplex));
double* l_pAbsVal=(double*)calloc(params.scanNumber.z,sizeof(double));
// allocate device memory for raw signal
hipDoubleComplex* d_pRawSig;
if (hipSuccess != hipMalloc((void**)&d_pRawSig, params.scanNumber.z*sizeof(hipDoubleComplex)))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// transfer rawSig to device
//if (hipSuccess != hipMemcpy(d_pRawSig, l_pRawSig, params.scanNumber.z*sizeof(hipDoubleComplex), hipMemcpyHostToDevice))
//{
// std::cout << "error in cu_simConfPointSensorSig_wrapper: hipMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
// return false;
//}
// use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
// allocate device meory for params
ConfPoint_KernelParams* d_pParams;
if (hipSuccess != hipMalloc((void**)&d_pParams, sizeof(ConfPoint_KernelParams)))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// transfer params to device
if (hipSuccess != hipMemcpy(d_pParams, ¶ms, sizeof(ConfPoint_KernelParams), hipMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: hipMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// allocate device meory for params
ConfPoint_KernelObjectParams* d_pObjParams;
if (hipSuccess != hipMalloc((void**)&d_pObjParams, sizeof(ConfPoint_KernelObjectParams)))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// transfer params to device
if (hipSuccess != hipMemcpy(d_pObjParams, ¶msObject, sizeof(ConfPoint_KernelObjectParams), hipMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: hipMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// allocate device memory for pupil field
hipDoubleComplex* d_pPupField;
if (hipSuccess != hipMalloc((void**)&d_pPupField, params.n*params.n*sizeof(hipDoubleComplex)))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// allocate device memory for object field
hipDoubleComplex* d_pObjField;
if (hipSuccess != hipMalloc((void**)&d_pObjField, params.n*params.n*sizeof(hipDoubleComplex)))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// calc dimensions of kernel launch when have one kernel per element in the pupil field
dim3 dimBlock(block_size,block_size,1); // number of threads within each block in x,y,z (maximum of 512 or 1024 in total. I.e. 512,1,1 or 8,16,2 or ...
unsigned int mod= params.n % block_size;
unsigned int dimGridx;
if (mod==0)
dimGridx=params.n/(1*block_size);
else
dimGridx=params.n/(1*block_size+1);
unsigned int dimGridy;
if (mod==0)
dimGridy=params.n/(1*block_size);
else
dimGridy=params.n/(1*block_size+1);
dim3 dimGrid(::max(dimGridx,unsigned int(1)),::max(dimGridy,unsigned int(1)),1); // number of blocks in x,y,z (maximum of 65535 for each dimension)
hipDeviceSynchronize();
end=clock();
msecs_DataTransfer=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
start=clock();
hipfftHandle plan;
if (!myCufftSafeCall(hipfftPlan2d(&plan,params.n, params.n, HIPFFT_Z2Z)))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: hipfftPlan2d returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// create rawSigIdx on GPU
int rawSigIdx=0;
int *d_pRawSigIdx;
if (hipSuccess != hipMalloc((void**)&d_pRawSigIdx, sizeof(int)))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
// transfer rawSig to device
if (hipSuccess != hipMemcpy(d_pRawSigIdx, &rawSigIdx, sizeof(int), hipMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: hipMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// create cog on GPU
double cog=0;
double *d_pCog;
if (hipSuccess != hipMalloc((void**)&d_pCog, sizeof(double)))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
// transfer cog to device
if (hipSuccess != hipMemcpy(d_pCog, &cog, sizeof(int), hipMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: hipMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
hipDeviceSynchronize();
end=clock();
msecs_DataTransfer+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
Lock lock;
int blocksPerGrid=(params.n*params.n+THREADSPERBLOCK-1)/THREADSPERBLOCK;
// allocate host memory for pupil field
hipDoubleComplex* h_pPupField=(hipDoubleComplex*)malloc(params.n*params.n*sizeof(hipDoubleComplex));
// do the simulation
for (unsigned int jy=0; jy<params.scanNumber.y; jy++)
{
for (unsigned int jx=0; jx<params.scanNumber.x; jx++)
{
start=clock();
// create pupil field according to aberrations
hipLaunchKernelGGL(( createField_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_pPupField, d_pParams);
hipDeviceSynchronize();
end=clock();
msecs_createField+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
// reset rawSigIdx
if (hipSuccess != hipMemcpy(d_pRawSigIdx, &rawSigIdx, sizeof(int), hipMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: hipMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
hipDeviceSynchronize();
// reset rawSig
if (hipSuccess != hipMemcpy(d_pRawSig, l_pRawSigInit, params.scanNumber.z*sizeof(hipDoubleComplex), hipMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: hipMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
hipDeviceSynchronize();
for (unsigned int jz=0; jz<params.scanNumber.z; jz++)
{
start=clock();
// apply defocus
hipLaunchKernelGGL(( defocField_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_pPupField, d_pParams);
hipDeviceSynchronize();
end=clock();
msecs_Defoc+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
// transfer pupil field from device
if (hipSuccess != hipMemcpy(h_pPupField, d_pPupField, params.n*params.n*sizeof(hipDoubleComplex), hipMemcpyDeviceToHost))
return false;
char t_filename[512];
sprintf(t_filename, "E:\\pupReal%i.txt", jz);
FILE* hFile;
hFile = fopen( t_filename, "w" ) ;
if ( (hFile == NULL) )
return 1;
for (unsigned int jy=0; jy<params.n; jy++)
{
for (unsigned int jx=0; jx<params.n; jx++)
{
fprintf(hFile, " %.16e;\n", h_pPupField[jx+jy*params.n].x);
}
}
fclose(hFile);
sprintf(t_filename, "E:\\pupImag%i.txt", jz);
hFile = fopen( t_filename, "w" ) ;
if ( (hFile == NULL) )
return 1;
for (unsigned int jy=0; jy<params.n; jy++)
{
for (unsigned int jx=0; jx<params.n; jx++)
{
fprintf(hFile, " %.16e;\n", h_pPupField[jx+jy*params.n].y);
}
}
fclose(hFile);
start=clock();
// note that object field is not fftshifted after call to cufft !!
if (!myCufftSafeCall(hipfftExecZ2Z(plan, (hipfftDoubleComplex *)d_pPupField, (hipfftDoubleComplex *)d_pObjField, HIPFFT_FORWARD)))
{
// try again, just to be sure...
if (!myCufftSafeCall(hipfftExecZ2Z(plan, (hipfftDoubleComplex *)d_pPupField, (hipfftDoubleComplex *)d_pObjField, HIPFFT_FORWARD)))
{
hipFree(d_pParams);
hipFree(d_pPupField);
hipFree(d_pObjField);
hipFree(d_pRawSig);
hipFree(d_pObjParams);
hipFree(d_pRawSigIdx);
hipfftDestroy (plan);
delete l_pRawSig;
delete l_pRawSigInit;
//thrust::device_free(d_pObjField_thrust);
std::cout << "error in cu_simConfPointSensorSig_wrapper: hipfftExecZ2Z returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
}
hipDeviceSynchronize();
end=clock();
msecs_FFT+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
// transfer object field from device
if (hipSuccess != hipMemcpy(h_pPupField, d_pObjField, params.n*params.n*sizeof(hipDoubleComplex), hipMemcpyDeviceToHost))
return false;
sprintf(t_filename, "E:\\objInReal%i.txt", jz);
hFile = fopen( t_filename, "w" ) ;
if ( (hFile == NULL) )
return 1;
for (unsigned int jy=0; jy<params.n; jy++)
{
for (unsigned int jx=0; jx<params.n; jx++)
{
fprintf(hFile, " %.16e;\n", h_pPupField[jx+jy*params.n].x);
}
}
fclose(hFile);
sprintf(t_filename, "E:\\objInImag%i.txt", jz);
hFile = fopen( t_filename, "w" ) ;
if ( (hFile == NULL) )
return 1;
for (unsigned int jy=0; jy<params.n; jy++)
{
for (unsigned int jx=0; jx<params.n; jx++)
{
fprintf(hFile, " %.16e;\n", h_pPupField[jx+jy*params.n].y);
}
}
fclose(hFile);
// do the object interaction in TEA
hipLaunchKernelGGL(( objectInteractionTEA), dim3(dimGrid),dim3(dimBlock), 0, 0, d_pObjField, d_pParams, d_pObjParams, jx);
// allocate host memory for pupil field
// transfer pupil field from device
if (hipSuccess != hipMemcpy(h_pPupField, d_pObjField, params.n*params.n*sizeof(hipDoubleComplex), hipMemcpyDeviceToHost))
return false;
sprintf(t_filename, "E:\\objOutReal%i.txt", jz);
hFile = fopen( t_filename, "w" ) ;
if ( (hFile == NULL) )
return 1;
for (unsigned int jy=0; jy<params.n; jy++)
{
for (unsigned int jx=0; jx<params.n; jx++)
{
fprintf(hFile, " %.16e;\n", h_pPupField[jx+jy*params.n].x);
}
}
fclose(hFile);
sprintf(t_filename, "E:\\objOutImag%i.txt", jz);
hFile = fopen( t_filename, "w" ) ;
if ( (hFile == NULL) )
return 1;
for (unsigned int jy=0; jy<params.n; jy++)
{
for (unsigned int jx=0; jx<params.n; jx++)
{
fprintf(hFile, " %.16e;\n", h_pPupField[jx+jy*params.n].y);
}
}
fclose(hFile);
start=clock();
// calc the inner product on GPU
hipLaunchKernelGGL(( innerProduct), dim3(blocksPerGrid), dim3(THREADSPERBLOCK), 0, 0, lock, d_pObjField, d_pRawSig, d_pRawSigIdx, params.n*params.n);
hipLaunchKernelGGL(( incrementIdx), dim3(1),dim3(1), 0, 0, d_pRawSigIdx);
hipDeviceSynchronize();
end=clock();
msecs_Reduce+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
}
start=clock();
// copy data back from GPU
if (hipSuccess != hipMemcpy(l_pRawSig, d_pRawSig, params.scanNumber.z*sizeof(hipDoubleComplex), hipMemcpyDeviceToHost) )
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: hipMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
hipDeviceSynchronize();
end=clock();
msecs_DataTransfer+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
char t_filename[512];
sprintf(t_filename, "E:\\rawSigReal%i.txt", jx);
FILE* hFile;
hFile = fopen( t_filename, "w" ) ;
if ( (hFile == NULL) )
return 1;
for (unsigned int idz=0; idz<params.scanNumber.z; idz++)
{
fprintf(hFile, " %.16e;\n", l_pRawSig[idz].x);
}
fclose(hFile);
sprintf(t_filename, "E:\\rawSigImag%i.txt", jx);
hFile = fopen( t_filename, "w" ) ;
if ( (hFile == NULL) )
return 1;
for (unsigned int idz=0; idz<params.scanNumber.z; idz++)
{
fprintf(hFile, " %.16e;\n", l_pRawSig[idz].y);
}
fclose(hFile);
// find signal maxmium
double sigMax=0;
for (unsigned int idx=0; idx<params.scanNumber.z; idx++)
{
l_pAbsVal[idx]=pow(cuCabs(l_pRawSig[idx]),2);
sigMax=(sigMax > l_pAbsVal[idx]) ? sigMax : l_pAbsVal[idx];
}
// calc cog
double nom=0;
double denom=0;
for (unsigned int idx=0; idx<params.scanNumber.z; idx++)
{
if (l_pAbsVal[idx] > sigMax/2)
{
nom+=double(idx)*l_pAbsVal[idx];
denom+=l_pAbsVal[idx];
}
}
double x=jx*params.scanStep.x;
double z0=paramsObject.A*cos(paramsObject.kN*x);
(*ppSensorSig)[jx+jy*params.scanNumber.y]=nom/denom*params.scanStep.z-params.scanStep.z*params.scanNumber.z/2+z0;
}
}
std::cout << msecs_DataTransfer << "msec for data transfer between CPU and GPU" << "\n";
std::cout << msecs_FFT << "msec for fft" << "\n";
std::cout << msecs_Defoc << "msec for defocus kernel" << "\n";
std::cout << msecs_createField << "msec to create the field" << "\n";
std::cout << msecs_Reduce << "msec to calculate the reduction" << "\n";
std::cout << msecs_ObjectInteraction << "msec for calculating object interaction" << "\n";
// end timing
endGes=clock();
msecs=((endGes-startGes)/(double)CLOCKS_PER_SEC*1000.0);
std::cout << msecs <<" ms to simulate confocal raw signal"<< "...\n";
hipFree(d_pObjParams);
hipFree(d_pParams);
hipFree(d_pPupField);
hipFree(d_pObjField);
hipFree(d_pObjParams);
hipFree(d_pRawSigIdx);
delete l_pRawSig;
delete l_pRawSigInit;
hipfftDestroy (plan);
return true;
}
bool cu_simConfPointRawSig_wrapper(double** ppRawSig, ConfPoint_KernelParams params)
{
hipDeviceProp_t deviceProp;
hipError_t error;
// timing stuff
clock_t start, end, startGes, endGes;
double msecs_DataTransfer=0;
double msecs_DataTransfer1=0;
double msecs_Defoc=0;
double msecs_FFT=0;
double msecs_Reduce=0;
double msecs_createField=0;
double msecs=0;
// start timing
start=clock();
startGes=clock();
// check device
error = hipGetDeviceProperties(&deviceProp, 0);
if (error != hipSuccess)
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipGetDeviceProperties returned error code " << error << " line: " << __LINE__ << "...\n";
return false;
}
// allocate host memory for raw signal
*ppRawSig=(double*)calloc(params.scanNumber.x*params.scanNumber.y*params.scanNumber.z,sizeof(double));
// allocate host memory for complex raw signal
hipDoubleComplex* l_pRawSig=(hipDoubleComplex*)calloc(params.scanNumber.x*params.scanNumber.y*params.scanNumber.z,sizeof(hipDoubleComplex));
// allocate device memory for raw signal
hipDoubleComplex* d_pRawSig;
if (hipSuccess != hipMalloc((void**)&d_pRawSig, params.scanNumber.x*params.scanNumber.y*params.scanNumber.z*sizeof(hipDoubleComplex)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// transfer rawSig to device
if (hipSuccess != hipMemcpy(d_pRawSig, l_pRawSig, params.scanNumber.x*params.scanNumber.y*params.scanNumber.z*sizeof(hipDoubleComplex), hipMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
// allocate device meory for params
ConfPoint_KernelParams* d_pParams;
if (hipSuccess != hipMalloc((void**)&d_pParams, sizeof(ConfPoint_KernelParams)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// transfer params to device
if (hipSuccess != hipMemcpy(d_pParams, ¶ms, sizeof(ConfPoint_KernelParams), hipMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// allocate device memory for pupil field
hipDoubleComplex* d_pPupField;
if (hipSuccess != hipMalloc((void**)&d_pPupField, params.n*params.n*sizeof(hipDoubleComplex)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// allocate device memory for object field
hipDoubleComplex* d_pObjField;
if (hipSuccess != hipMalloc((void**)&d_pObjField, params.n*params.n*sizeof(hipDoubleComplex)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// calc dimensions of kernel launch when have one kernel per element in the pupil field
dim3 dimBlock(block_size,block_size,1); // number of threads within each block in x,y,z (maximum of 512 or 1024 in total. I.e. 512,1,1 or 8,16,2 or ...
unsigned int mod= params.n % block_size;
unsigned int dimGridx;
if (mod==0)
//dimGridx=params.n/(8*block_size);
dimGridx=params.n/(1*block_size);
else
dimGridx=params.n/(1*block_size+1);
unsigned int dimGridy;
if (mod==0)
dimGridy=params.n/(1*block_size);
else
dimGridy=params.n/(1*block_size+1);
dim3 dimGrid(::max(dimGridx,unsigned int(1)),::max(dimGridy,unsigned int(1)),1); // number of blocks in x,y,z (maximum of 65535 for each dimension)
hipDeviceSynchronize();
end=clock();
msecs_DataTransfer=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
start=clock();
// create pupil field according to aberrations
hipLaunchKernelGGL(( createField_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_pPupField, d_pParams);
hipDeviceSynchronize();
end=clock();
msecs_createField=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
//// allocate host memory for pupil field
//hipDoubleComplex* h_pPupField=(hipDoubleComplex*)malloc(params.n*params.n*sizeof(hipDoubleComplex));
//// transfer pupil field from device
//if (hipSuccess != hipMemcpy(h_pPupField, d_pPupField, params.n*params.n*sizeof(hipDoubleComplex), hipMemcpyDeviceToHost))
// return false;
//char t_filename[512];
//sprintf(t_filename, "E:\\testReal.txt");
//FILE* hFile;
//hFile = fopen( t_filename, "w" ) ;
//if ( (hFile == NULL) )
// return 1;
//for (unsigned int jy=0; jy<params.n; jy++)
//{
// for (unsigned int jx=0; jx<params.n; jx++)
// {
// fprintf(hFile, " %.16e;\n", h_pPupField[jx+jy*params.n].x);
// }
//}
//fclose(hFile);
//sprintf(t_filename, "E:\\testImag.txt");
//hFile = fopen( t_filename, "w" ) ;
//if ( (hFile == NULL) )
// return 1;
//for (unsigned int jy=0; jy<params.n; jy++)
//{
// for (unsigned int jx=0; jx<params.n; jx++)
// {
// fprintf(hFile, " %.16e;\n", h_pPupField[jx+jy*params.n].y);
// }
//}
//fclose(hFile);
start=clock();
hipfftHandle plan;
if (!myCufftSafeCall(hipfftPlan2d(&plan,params.n, params.n, HIPFFT_Z2Z)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipfftPlan2d returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// create rawSigIdx on GPU
int rawSigIdx=0;
int *d_pRawSigIdx;
if (hipSuccess != hipMalloc((void**)&d_pRawSigIdx, sizeof(int)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
// transfer rawSig to device
if (hipSuccess != hipMemcpy(d_pRawSigIdx, &rawSigIdx, sizeof(int), hipMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
hipDeviceSynchronize();
end=clock();
msecs_DataTransfer+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
Lock lock;
int blocksPerGrid=(params.n*params.n+THREADSPERBLOCK-1)/THREADSPERBLOCK;
// do the simulation
for (unsigned int jy=0; jy<params.scanNumber.y; jy++)
{
for (unsigned int jx=0; jx<params.scanNumber.x; jx++)
{
for (unsigned int jz=0; jz<params.scanNumber.z; jz++)
{
start=clock();
// apply defocus
hipLaunchKernelGGL(( defocField_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_pPupField, d_pParams);
hipDeviceSynchronize();
end=clock();
msecs_Defoc+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
start=clock();
// note that object field is not fftshifted after call to cufft !!
if (!myCufftSafeCall(hipfftExecZ2Z(plan, (hipfftDoubleComplex *)d_pPupField, (hipfftDoubleComplex *)d_pObjField, HIPFFT_FORWARD)))
{
// try again, just to be sure...
if (!myCufftSafeCall(hipfftExecZ2Z(plan, (hipfftDoubleComplex *)d_pPupField, (hipfftDoubleComplex *)d_pObjField, HIPFFT_FORWARD)))
{
hipFree(d_pParams);
hipFree(d_pPupField);
hipFree(d_pObjField);
hipFree(d_pRawSig);
hipFree(d_pRawSigIdx);
hipfftDestroy (plan);
//thrust::device_free(d_pObjField_thrust);
std::cout << "error in cu_simConfPointRawSig_wrapper: hipfftExecZ2Z returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
}
hipDeviceSynchronize();
end=clock();
msecs_FFT+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
start=clock();
// calc the inner product on GPU
hipLaunchKernelGGL(( innerProduct), dim3(blocksPerGrid), dim3(THREADSPERBLOCK), 0, 0, lock, d_pObjField, d_pRawSig, d_pRawSigIdx, params.n*params.n);
hipLaunchKernelGGL(( incrementIdx), dim3(1),dim3(1), 0, 0, d_pRawSigIdx);
hipDeviceSynchronize();
end=clock();
msecs_Reduce+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
}
}
}
clock_t start1=clock();
// copy data back from GPU
if (hipSuccess != hipMemcpy(l_pRawSig, d_pRawSig, params.scanNumber.x*params.scanNumber.y*params.scanNumber.z*sizeof(hipDoubleComplex), hipMemcpyDeviceToHost) )
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
hipDeviceSynchronize();
clock_t end1=clock();
msecs_DataTransfer1=((end1-start1)/(double)CLOCKS_PER_SEC*1000.0);
std::cout << msecs_DataTransfer << "msec for data transfer between CPU and GPU" << "\n";
std::cout << msecs_DataTransfer1 << "msec for data transfer1 between CPU and GPU" << "\n";
std::cout << msecs_FFT << "msec for fft" << "\n";
std::cout << msecs_Defoc << "msec for defocus kernel" << "\n";
std::cout << msecs_createField << "msec to create the field" << "\n";
std::cout << msecs_Reduce << "msec to calculate the reduction" << "\n";
// calc magnitude square
for (unsigned int idx=0; idx<params.scanNumber.x*params.scanNumber.y*params.scanNumber.z; idx++)
{
(*ppRawSig)[idx]=pow(cuCabs(l_pRawSig[idx]),2);
}
// end timing
endGes=clock();
msecs=((endGes-startGes)/(double)CLOCKS_PER_SEC*1000.0);
std::cout << msecs <<" ms to simulate confocal raw signal"<< "...\n";
hipFree(d_pParams);
hipFree(d_pPupField);
hipFree(d_pObjField);
hipFree(d_pRawSig);
hipFree(d_pRawSigIdx);
delete l_pRawSig;
hipfftDestroy (plan);
return true;
}
bool cu_simConfPointRawSig_wrapper1(double** ppRawSig, ConfPoint_KernelParams params)
{
hipDeviceProp_t deviceProp;
hipError_t error;
// check device
error = hipGetDeviceProperties(&deviceProp, 0);
if (error != hipSuccess)
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipGetDeviceProperties returned error code " << error << " line: " << __LINE__ << "...\n";
return false;
}
// use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
// allocate device meory for params
ConfPoint_KernelParams* d_pParams;
if (hipSuccess != hipMalloc((void**)&d_pParams, sizeof(ConfPoint_KernelParams)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// transfer params to device
if (hipSuccess != hipMemcpy(d_pParams, ¶ms, sizeof(ConfPoint_KernelParams), hipMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// allocate device memory for pupil field
hipDoubleComplex* d_pPupField;
if (hipSuccess != hipMalloc((void**)&d_pPupField, params.n*params.n*sizeof(hipDoubleComplex)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// allocate device memory for object field
hipDoubleComplex* d_pObjField;
if (hipSuccess != hipMalloc((void**)&d_pObjField, params.n*params.n*sizeof(hipDoubleComplex)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// calc dimensions of kernel launch when have one kernel per element in the pupil field
dim3 dimBlock(block_size,block_size,1); // number of threads within each block in x,y,z (maximum of 512 or 1024 in total. I.e. 512,1,1 or 8,16,2 or ...
unsigned int mod= params.n % block_size;
unsigned int dimGridx;
if (mod==0)
dimGridx=params.n/block_size;
else
dimGridx=params.n/block_size+1;
unsigned int dimGridy;
if (mod==0)
dimGridy=params.n/block_size;
else
dimGridy=params.n/block_size+1;
dim3 dimGrid(::max(dimGridx,unsigned int(1)),::max(dimGridy,unsigned int(1)),1); // number of blocks in x,y,z (maximum of 65535 for each dimension)
// create pupil field according to aberrations
hipLaunchKernelGGL(( createField_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_pPupField, d_pParams);
hipfftHandle plan;
if (!myCufftSafeCall(hipfftPlan2d(&plan,params.n, params.n, HIPFFT_Z2Z)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipfftPlan2d returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// calc dimensions of kernel launch for reduction
int blocksReduction;
int threadsReduction;
int maxBlocks=1024; // why this number??
int maxThreads=deviceProp.maxThreadsPerBlock;
getNumBlocksAndThreads(3, params.n*params.n, maxBlocks, maxThreads, blocksReduction, threadsReduction);
dim3 dimBlockReduction(threadsReduction,1,1);
dim3 dimGridReduction(blocksReduction,1,1);
hipDoubleComplex *d_pOutData;
if (hipSuccess != hipMalloc((void**)&d_pOutData, blocksReduction*sizeof(hipDoubleComplex)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 100;
}
// alloacte host memory fpr raw signal
double rawSig=0.0;
double *h_pRawSig=(double*)malloc(params.scanNumber.x*params.scanNumber.y*params.scanNumber.z*sizeof(double));
// allocate device memory for raw signal
*ppRawSig=(double*)malloc(params.scanNumber.x*params.scanNumber.y*params.scanNumber.z*sizeof(double));
// allocate device memory for raw signal
double* d_pRawSig;
if (hipSuccess != hipMalloc((void**)&d_pRawSig, params.scanNumber.x*params.scanNumber.y*params.scanNumber.z*sizeof(double)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// index of raw signal
unsigned int *d_index;
unsigned int index=0;
if (hipSuccess != hipMalloc((void**)&d_index, sizeof(unsigned int)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
if (hipSuccess != hipMemcpy(d_index, &index, sizeof(unsigned int), hipMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threadsReduction <= block_size) ? 2 * threadsReduction * sizeof(hipDoubleComplex) : threadsReduction * sizeof(hipDoubleComplex);
clock_t start, end;
double msecs=0;
// start timing
start=clock();
// do the simulation
for (unsigned int jy=0; jy<params.scanNumber.y; jy++)
{
for (unsigned int jx=0; jx<params.scanNumber.x; jx++)
{
for (unsigned int jz=0; jz<params.scanNumber.z; jz++)
{
// apply defocus
hipLaunchKernelGGL(( defocField_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_pPupField, d_pParams);
// note that object field is not fftshifted after call to cufft !!
if (!myCufftSafeCall(hipfftExecZ2Z(plan, (hipfftDoubleComplex *)d_pPupField, (hipfftDoubleComplex *)d_pObjField, HIPFFT_FORWARD)))
{
hipFree(d_pParams);
hipFree(d_pPupField);
hipFree(d_pObjField);
hipFree(d_pRawSig);
hipfftDestroy (plan);
{
std::cout << "error in cu_simConfPointRawSig_wrapper: hipfftExecZ2Z returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
}
// do the summation
reduceOverlap(params.n*params.n, threadsReduction, blocksReduction, maxThreads, maxBlocks, d_pObjField, d_pOutData, d_pRawSig, h_pRawSig, d_index);
}
}
}
hipMemcpy(h_pRawSig, d_pRawSig, params.scanNumber.x*params.scanNumber.y*params.scanNumber.z*sizeof(double), hipMemcpyDeviceToHost);
// end timing
end=clock();
msecs=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
std::cout << msecs <<" ms to simulate confocal raw signal"<< "...\n";
hipFree(d_pParams);
hipFree(d_pPupField);
hipFree(d_pObjField);
hipFree(d_pRawSig);
hipfftDestroy (plan);
return true;
} | f321c8617d119e49cfa067fb78101a4734d24dbc.cu | #include "kernel.h"
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/copy.h>
#include <ctime>
#include <algorithm>
//#include "cutil_math.h"
//#include "cutil_inline.h"
#ifndef PI
#define PI 3.14159265358979323846
#endif
#define THREADSPERBLOCK 1024
unsigned int nextPow2(unsigned int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
#ifndef MIN
#define MIN(x,y) ((x < y) ? x : y)
#endif
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ T __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ T __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
//template<>
//struct SharedMemory<double>
//{
// __device__ inline operator double *()
// {
// extern __shared__ double __smem_d[];
// return (double *)__smem_d;
// }
//
// __device__ inline operator const double *() const
// {
// extern __shared__ double __smem_d[];
// return (double *)__smem_d;
// }
//};
// specialize for cuDoubleComplex to avoid unaligned memory
// access compile errors
template<>
struct SharedMemory<cuDoubleComplex>
{
__device__ inline operator cuDoubleComplex *()
{
extern __shared__ cuDoubleComplex __smem_d[];
return (cuDoubleComplex *)__smem_d;
}
__device__ inline operator const cuDoubleComplex *() const
{
extern __shared__ cuDoubleComplex __smem_d[];
return (cuDoubleComplex *)__smem_d;
}
};
struct squareCuDoubleComplex
{
__host__ __device__
cuDoubleComplex operator()(const cuDoubleComplex& x) const {
return make_cuDoubleComplex(cuCabs(x),0.0);
//return cuCmul(x,cuCmul(make_cuDoubleComplex(9.5367e-7,0),x));
}
};
struct addCuDoubleComplex
{
__host__ __device__
cuDoubleComplex operator()(const cuDoubleComplex& x, const cuDoubleComplex& y) const {
return cuCadd(x,y);
}
};
////////////////////////////////////////////////////////////////////////////////
// Compute the number of threads and blocks to use for the given reduction kernel
// For the kernels >= 3, we set threads / block to the minimum of maxThreads and
// n/2. For kernels < 3, we set to the minimum of maxThreads and n. For kernel
// 6, we observe the maximum specified number of blocks, because each thread in
// that kernel can process a variable number of elements.
////////////////////////////////////////////////////////////////////////////////
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
cudaError_t error;
//get device capability, to avoid block/grid size excceed the upbound
cudaDeviceProp prop;
int device;
(cudaGetDevice(&device));
// check device
error = cudaGetDeviceProperties(&prop, device);
if (error != cudaSuccess)
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaGetDeviceProperties returned error code " << error << " line: " << __LINE__ << "...\n";
}
if (whichKernel < 3)
{
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
if (threads*blocks > prop.maxGridSize[0] * prop.maxThreadsPerBlock)
{
printf("n is too large, please choose a smaller number!\n");
}
if (blocks > prop.maxGridSize[0])
{
printf("Grid size <%d> excceeds the device capability <%d>, set block size as %d (original %d)\n",
blocks, prop.maxGridSize[0], threads*2, threads);
blocks /= 2;
threads *= 2;
}
if (whichKernel == 6)
{
blocks = MIN(maxBlocks, blocks);
}
}
// Complex pointwise multiplication
//__global__ void complexPointwiseMul_kernel(cuDoubleComplex* a, const cuDoubleComplex* b, int size)
//{
// const int numThreads = blockDim.x * gridDim.x;
// const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
// for (int i = threadID; i < size; i += numThreads)
// a[i] = cuCmul(a[i], b[i]);
//}
// Complex pointwise multiplication
//__global__ void complexPointwiseMulandScale_kernel(cuDoubleComplex* a, const cuDoubleComplex* b, int size, double scale)
//{
// const int numThreads = blockDim.x * gridDim.x;
// const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
// for (int i = threadID; i < size; i += numThreads)
// {
// a[i] = cuCmul(a[i], b[i]);
// a[i].x = a[i].x*scale;
// a[i].y = a[i].y*scale;
// }
//}
// create Wavefront
// our field is a 1D-vector representation of a 2D-field in row major format. x is along rows
__global__ void defocField_kernel(cuDoubleComplex* d_pField, ConfPoint_KernelParams *d_pParams)
{
unsigned int xGes=(blockIdx.x*blockDim.x+threadIdx.x);
unsigned int yGes=(blockIdx.y*blockDim.y+threadIdx.y);
//unsigned int yGesBase=(blockIdx.y*blockDim.y+threadIdx.y);
//while (xGes<s_n)
//{
// unsigned int yGes=yGesBase;
// while (yGes <s_n)
// {
if (xGes+yGes*d_pParams->n < d_pParams->n*d_pParams->n)
{
double x=double(xGes)*(d_pParams->gridWidth/d_pParams->n)-d_pParams->gridWidth/2;
double y=double(yGes)*(d_pParams->gridWidth/d_pParams->n)-d_pParams->gridWidth/2;
double sigmaX=sin(atan(-x/(160/d_pParams->magnif)));
double sigmaY=sin(atan(-y/(160/d_pParams->magnif)));
double sigmaZ=(1-sigmaX*sigmaX-sigmaY*sigmaY);
if (sigmaZ<0)
sigmaZ=0;
else
sigmaZ=sqrt(sigmaZ);
// calc defocus
if (xGes+yGes*d_pParams->n < d_pParams->n*d_pParams->n)
{
// calc defocus phase
double phase=-2*PI/d_pParams->wvl*sigmaZ*d_pParams->scanStep.z;
d_pField[xGes+d_pParams->n*yGes]=cuCmul(d_pField[xGes+d_pParams->n*yGes],make_cuDoubleComplex(cos(phase),sin(phase)));
//d_pField[xGes+s_n*yGes]=make_cuDoubleComplex(cos(phase),sin(phase));
}
}
// yGes+=blockDim.y*gridDim.y;
// }
// xGes+=blockDim.x*gridDim.x;
//}
}
__global__ void objectInteractionTEA(cuDoubleComplex* pObjField, ConfPoint_KernelParams* pParams, ConfPoint_KernelObjectParams* pObjParams, unsigned int jx)
{
__shared__ unsigned int s_n;
__shared__ double s_k, s_NA, s_gridWidth, s_f, s_kN, s_A, s_delta1;
// load shared memory
if (threadIdx.x==0)
{
s_k=2*PI/pParams->wvl;
s_NA=pParams->NA;
s_gridWidth=pParams->gridWidth;
s_f=160/pParams->magnif; // we assume a tubus length of 160mm here
s_n=pParams->n;
s_kN=pObjParams->kN;
s_A=pObjParams->A;
s_delta1=pParams->scanStep.x;
}
__syncthreads();
unsigned int xGes=(blockIdx.x*blockDim.x+threadIdx.x);
unsigned int yGes=(blockIdx.y*blockDim.y+threadIdx.y);
unsigned int xGesObj;
if (xGes+yGes*s_n < s_n*s_n)
{
// calc sampling distance in object grid
double delta2=(2*PI/s_k)*s_f/s_gridWidth;
// we have to consider the fact that pObjField is not fftshifted...
if (xGes < s_n/2)
xGesObj=xGes+s_n/2;
else
xGesObj=xGes-s_n/2;
// calc coordinates in fftshifted object grid
double x=jx*s_delta1+((-double(s_n)/2)+xGesObj)*delta2;
double phase_obj=-s_k*s_A*cos(s_kN*x);
// multiply field with object phase phase
pObjField[xGes+yGes*s_n]=cuCmul(pObjField[xGes+yGes*s_n],make_cuDoubleComplex(cos(phase_obj),sin(phase_obj)));
}
}
__global__ void createField_kernel(cuDoubleComplex* pPupField, ConfPoint_KernelParams* pParams)
{
__shared__ double s_aberrVec[16];
__shared__ double s_gridWidth, s_magnif, s_k, s_NA, s_deltaZ, s_apodRadius, s_f;
__shared__ unsigned int s_n;
// load aberration coeffs in shared memory
if (threadIdx.x < 16)
{
s_aberrVec[threadIdx.x]=pParams->pAberrVec[threadIdx.x];
if (threadIdx.x==0)
{
s_gridWidth=pParams->gridWidth;
s_magnif=pParams->magnif;
s_k=2*PI/pParams->wvl;
s_NA=pParams->NA;
s_n=pParams->n;
s_deltaZ=-pParams->scanStep.z*pParams->scanNumber.z/2;
s_apodRadius=pParams->apodisationRadius;
s_f=160/pParams->magnif;// we assume a tubus length of 160mm here to calculate the focal length of the objective lens
}
}
__syncthreads();
unsigned int xGes=(blockIdx.x*blockDim.x+threadIdx.x);
unsigned int yGes=(blockIdx.y*blockDim.y+threadIdx.y);
//unsigned int xGes=(blockIdx.x*blockDim.x+threadIdx.x);
//unsigned int yGesBase=(blockIdx.y*blockDim.y+threadIdx.y);
//while (xGes<s_n)
//{
// unsigned int yGes=yGesBase;
// while (yGes <s_n)
// {
if (xGes+yGes*s_n < s_n*s_n)
{
// calc coordinates in pupil grid
double x=double(xGes)*(s_gridWidth/s_n)-s_gridWidth/2;
double y=double(yGes)*(s_gridWidth/s_n)-s_gridWidth/2;
// calc width of pupil
double wPup=tan(asin(s_NA))*2*s_f;
double rho=sqrt(x*x+y*y)/(wPup/2); // normalized radial coordinate in pupil
double apodRad=s_apodRadius/(wPup/2); // normalized apodisation radius
double phi=atan2(y,x);
// calc initial defocus
double sigmaX=sin(atan(-x/s_f));
double sigmaY=sin(atan(-y/s_f));
double sigmaZ=(1-sigmaX*sigmaX-sigmaY*sigmaY);
if (sigmaZ>=0)
sigmaZ=sqrt(sigmaZ);
else
sigmaZ=0;
double cosThetaZ=abs(s_f/sqrt(x*x+y*y+s_f*s_f));
double apod=sqrt(cosThetaZ);
if (rho<=1)
{
// calc defocus phase
double phase_defoc=-s_k*sigmaZ*s_deltaZ;
double phase_aberr=s_k*(pParams->pAberrVec[0]
+pParams->pAberrVec[1]*rho*cos(phi)
+pParams->pAberrVec[2]*rho*sin(phi)
+pParams->pAberrVec[3]*(2*rho*rho-1)
+pParams->pAberrVec[4]*(rho*rho*cos(2*phi))
+pParams->pAberrVec[5]*(rho*rho*sin(2*phi))
+pParams->pAberrVec[6]*(3*pow(rho,3)-2*rho)*cos(phi)
+pParams->pAberrVec[7]*(3*pow(rho,3)-2*rho)*sin(phi)
+pParams->pAberrVec[8]*(6*pow(rho,4)-6*rho*rho+1)
+pParams->pAberrVec[9]*pow(rho,3)*cos(3*phi)
+pParams->pAberrVec[10]*pow(rho,3)*sin(3*phi)
+pParams->pAberrVec[11]*(4*pow(rho,4)-3*pow(rho,2))*cos(2*phi)
+pParams->pAberrVec[12]*(4*pow(rho,4)-3*pow(rho,2))*sin(2*phi)
+pParams->pAberrVec[13]*(10*pow(rho,5)-12*pow(rho,3)+3*rho)*cos(phi)
+pParams->pAberrVec[14]*(10*pow(rho,5)-12*pow(rho,3)+3*rho)*sin(phi)
+pParams->pAberrVec[15]*(20*pow(rho,6)-30*pow(rho,4)+12*pow(rho,2)-1));
double ampl=exp(-rho*rho/(apodRad*apodRad));
// create real and imaginary part of field with unity modulus and our phase
pPupField[xGes+yGes*s_n]=make_cuDoubleComplex(apod*ampl*cos(phase_aberr+phase_defoc),apod*ampl*sin(phase_aberr+phase_defoc));
//pPupField[xGes+yGes*pParams->n]=make_cuDoubleComplex(1.0,0.0);
}
else
pPupField[xGes+yGes*s_n]=make_cuDoubleComplex(0, 0);
}
// yGes+=blockDim.y*gridDim.y;
// }
// xGes+=blockDim.x*gridDim.x;
//}
}
//__global__ void kernel(int *a, int*b)
//{
// int tx = threadIdx.x;
//
// switch(tx)
// {
// case 0:
// *a=*a+10;
// break;
// case 1:
// *b=*b+3;
// break;
// default:
// break;
// }
//}
__global__ void scalar_RichardsonWolf_kernel(cuDoubleComplex* Uin_ptr, double* x1_ptr, double* y1_ptr, double* x2_ptr, double*y2_ptr, unsigned int dimx, unsigned int dimy, unsigned int TileWidth, unsigned int TileHeight, double wvl, double f, double Dz)
{
unsigned int jx=blockIdx.x*TileWidth+threadIdx.x;
unsigned int jy=blockIdx.y*TileHeight+threadIdx.y;
double dx1=abs(x1_ptr[1]-x1_ptr[0]);
double dy1=abs(y1_ptr[1]-y1_ptr[0]);
x2_ptr[jx]=(-1.0*dimx/2+jx)/(dimx*dx1)*wvl*f;
y2_ptr[jy]=(-1.0*dimy/2+jy)/(dimy*dy1)*wvl*f;
double sigmaX=-x1_ptr[jx]/f;
double sigmaY=-y1_ptr[jy]/f;
double GktSqr=1-sigmaX*sigmaX-sigmaY*sigmaY;
// free space propagation filters out evanescent waves...
if (GktSqr<0)
{
GktSqr=0.0;
Uin_ptr[jx+jy*dimy]=make_cuDoubleComplex(0.0,0.0);
}
else
{
// this looks kind of ugly because cudas complex<double> implementation doesn't have any operator notation...
//Uin_ptr[jx+jy*dimy]=make_cuDoubleComplex(0.0,-1.0)*f*Uin_ptr[jx+jy*dimy]/pow(make_cuDoubleComplex(1-sigmaX*sigmaX-sigmaY*sigmaY,0.0),0.25)*make_cuDoubleComplex(cos(2*PI/wvl*Dz*sqrt(GktSqr)),sin(2*PI/wvl*Dz*sqrt(GktSqr)));
cuDoubleComplex help=cuCmul(make_cuDoubleComplex(f/pow(1-sigmaX*sigmaX-sigmaY*sigmaY,0.25),0.0),Uin_ptr[jx+jy*dimy]);
help=cuCmul(make_cuDoubleComplex(0.0,-1.0),help);
Uin_ptr[jx+jy*dimy]=cuCmul(help,make_cuDoubleComplex(cos(2*PI/wvl*Dz*sqrt(GktSqr)),sin(2*PI/wvl*Dz*sqrt(GktSqr))));
}
}
//****************************************************************/
// wrappers
//****************************************************************/
bool cu_scalarRichardsonWolf_wrapper(cuDoubleComplex* Uin_ptr, unsigned int dimx, unsigned int dimy, double wvl, double* x1_ptr, double* y1_ptr, double f, double Dz, double** x2_ptrptr, double** y2_ptrptr)
{
// we handle only regularly squared grids here
if (dimx!=dimy)
return 0;
double k=2*PI/wvl;
double dx1=abs(x1_ptr[0]-x1_ptr[1]);
double dy1=abs(y1_ptr[0]-y1_ptr[1]);
if (dx1!=dy1)
return 0;
// transfer data to GPU
double* x2_kernel_ptr;
(cudaMalloc((void**)&x2_kernel_ptr, sizeof(double)*dimx));
double* y2_kernel_ptr;
(cudaMalloc((void**)&y2_kernel_ptr, sizeof(double)*dimy));
double* x1_kernel_ptr;
(cudaMalloc((void**)&x1_kernel_ptr, sizeof(double)*dimx));
(cudaMemcpy(x1_kernel_ptr, x1_ptr, sizeof(double)*dimx, cudaMemcpyHostToDevice));
double* y1_kernel_ptr;
(cudaMalloc((void**)&y1_kernel_ptr, sizeof(double)*dimy));
(cudaMemcpy(y1_kernel_ptr, y1_ptr, sizeof(double)*dimy, cudaMemcpyHostToDevice));
cuDoubleComplex* Uin_kernel_ptr;
(cudaMalloc((void**)&Uin_kernel_ptr, sizeof(cuDoubleComplex)*dimx*dimy));
(cudaMemcpy(Uin_kernel_ptr, Uin_ptr, sizeof(cuDoubleComplex)*dimx*dimy, cudaMemcpyHostToDevice));
unsigned int tileWidth=16;
unsigned int tileHeight=16;
dim3 dimBlock(tileWidth,tileHeight,1); // number of threads within each block in x,y,z (maximum of 512 in total. I.e. 512,1,1 or 8,16,2 or ...
dim3 dimGrid(dimx/tileWidth,dimy/tileHeight,1); // number of blocks in x,y,z (maximum of 65535 for each dimension)
scalar_RichardsonWolf_kernel<<<dimGrid, dimBlock>>>((cuDoubleComplex*)Uin_kernel_ptr, x1_kernel_ptr, y1_kernel_ptr, x2_kernel_ptr, y2_kernel_ptr, dimx, dimy, tileWidth, tileHeight, wvl, f, Dz);
// allocate host memory for observation plane coordinates
double *x2_l=(double*)calloc(dimx,sizeof(double));
double *y2_l=(double*)calloc(dimy,sizeof(double));
// transfer coordinates from GPU
(cudaMemcpy(x2_l, x2_kernel_ptr, sizeof(double)*dimx, cudaMemcpyDeviceToHost));
(cudaMemcpy(y2_l, y2_kernel_ptr, sizeof(double)*dimy, cudaMemcpyDeviceToHost));
//deallocate coordinates on GPU
cudaFree(x1_kernel_ptr);
cudaFree(x2_kernel_ptr);
cudaFree(y1_kernel_ptr);
cudaFree(y2_kernel_ptr);
// do fft
// plan fft
cufftHandle plan;
(cufftPlan2d(&plan,dimx, dimy, CUFFT_Z2Z));
// execute fft
(cufftExecZ2Z(plan, (cufftDoubleComplex *)Uin_kernel_ptr, (cufftDoubleComplex *)Uin_kernel_ptr, CUFFT_FORWARD));
// transfer optical field from GPU
(cudaMemcpy(Uin_ptr, Uin_kernel_ptr, sizeof(cuDoubleComplex)*dimy*dimx, cudaMemcpyDeviceToHost));
// deallocate optical field on GPU
cudaFree(Uin_kernel_ptr);
// destroy fft plan
cufftDestroy(plan);
// return pointer to new coordinates
*x2_ptrptr=x2_l;
*y2_ptrptr=y2_l;
return 1;
}
//
//void kernel_wrapper(int *a, int* b)
//{
// int *d_1, *d_2;
// dim3 threads( 2, 1);
// dim3 blocks( 1, 1);
//
// cudaMalloc( (void **)&d_1, sizeof(int) );
// cudaMalloc( (void **)&d_2, sizeof(int) );
//
// cudaMemcpy( d_1, a, sizeof(int), cudaMemcpyHostToDevice );
// cudaMemcpy( d_2, b, sizeof(int), cudaMemcpyHostToDevice );
//
// kernel<<< blocks, threads >>>( a, b );
//
// cudaMemcpy( a, d_1, sizeof(int), cudaMemcpyDeviceToHost );
// cudaMemcpy( b, d_2, sizeof(int), cudaMemcpyDeviceToHost );
//
// cudaFree(d_1);
// cudaFree(d_2);
//}
//
bool cu_angularSpectrumScaled_wrapper(cuDoubleComplex* Uin_ptr, unsigned int dimx, unsigned int dimy, double wvl, double* x1_ptr, double* y1_ptr, double Dz, double dx2, double** x2_ptrptr, double** y2_ptrptr)
{
// Allocate device memory for filter kernel
cuDoubleComplex* Uin_kernel_ptr;
(cudaMalloc((void**)&Uin_kernel_ptr, sizeof(cuDoubleComplex)*dimx*dimy));
// Copy host memory to device
(cudaMemcpy(Uin_kernel_ptr, Uin_ptr, sizeof(cuDoubleComplex)*dimx*dimy,
cudaMemcpyHostToDevice));
// CUFFT plan
cufftHandle plan;
(cufftPlan2d(&plan,dimx, dimy, CUFFT_Z2Z));
// Transform signal and kernel
printf("Transforming signal cufftExecC2C\n");
(cufftExecZ2Z(plan, (cufftDoubleComplex *)Uin_kernel_ptr, (cufftDoubleComplex *)Uin_kernel_ptr, CUFFT_FORWARD));
// copy device memory back to host
(cudaMemcpy(Uin_ptr, Uin_kernel_ptr, sizeof(cuDoubleComplex)*dimx*dimy, cudaMemcpyDeviceToHost));
return true;
}
//
//bool cu_angularSpectrum_ABCD(complex<double>* Uin_ptr, unsigned int dimx, unsigned int dimy, double wvl, double* x1_ptr, double* y1_ptr, double Dz, double dx2, double* ABCD, double** x2_ptrptr, double** y2_ptrptr)
//{
// return true;
//}
//
//bool cu_fresnel_two_step_1D(complex<double>* Uin_ptr, unsigned int dimx, double wvl, double* x1_ptr, double dx2, double Dz, double** x2_ptrptr )
//{
// return true;
//}
//
//bool cu_fresnel_two_step(complex<double>* Uin_ptr, unsigned int dimx, unsigned int dimy, double wvl, double* x1_ptr, double* y1_ptr, double dx2, double Dz, double** x2_ptrptr, double** y2_ptrptr)
//{
// return true;
//}
//
//bool cu_fresnel_one_step_1D(complex<double>* Uin_ptr, unsigned int dimx, double wvl, double* x1_ptr, double Dz, double** x2_ptrptr )
//{
// return true;
//}
//
//bool cu_fresnel_one_step(complex<double>* Uin_ptr, unsigned int dimx, unsigned int dimy, double wvl, double* x1_ptr, double* y1_ptr, double Dz, double** x2_ptrptr, double** y2_ptrptr)
//{
// return true;
//}
//
//bool cu_scalar_RichardsonWolf(complex<double>* Uin_ptr, unsigned int dimx, unsigned int dimy, double wvl, double* x1_ptr, double* y1_ptr, double f, double Dz, double** x2_ptrptr, double** y2_ptrptr)
//{
// // Allocate device memory
// complex<double>* Uin_kernel_ptr;
// cutilSafeCall(cudaMalloc((void**)&Uin_kernel_ptr, sizeof(complex<double>)*dimx*dimy));
//
// // Copy host memory to device
// cutilSafeCall(cudaMemcpy(Uin_kernel_ptr, Uin_ptr, sizeof(complex<double>)*dimx*dimy, cudaMemcpyHostToDevice));
//
// // Allocate device memory
// double* x1_kernel_ptr;
// cutilSafeCall(cudaMalloc((void**)&x1_kernel_ptr, sizeof(double)*dimx));
//
// // Copy host memory to device
// cutilSafeCall(cudaMemcpy(x1_kernel_ptr, x1_ptr, sizeof(double)*dimx,
// cudaMemcpyHostToDevice));
//
// // Allocate device memory
// double* y1_kernel_ptr;
// cutilSafeCall(cudaMalloc((void**)&y1_kernel_ptr, sizeof(double)*dimy));
//
// // Copy host memory to device
// cutilSafeCall(cudaMemcpy(y1_kernel_ptr, y1_ptr, sizeof(double)*dimy,
// cudaMemcpyHostToDevice));
//
// // allocate host memory
// *x2_ptrptr=(double*)calloc(dimx,sizeof(double));
// *y2_ptrptr=(double*)calloc(dimy,sizeof(double));
//
// // Allocate device memory
// double* x2_kernel_ptr;
// cutilSafeCall(cudaMalloc((void**)&x2_kernel_ptr, sizeof(double)*dimx));
//
// // Allocate device memory
// double* y2_kernel_ptr;
// cutilSafeCall(cudaMalloc((void**)&y2_kernel_ptr, sizeof(double)*dimy));
// // do the scaling
// cu_scalar_RichardsonWolf_kernel<<<32,512>>>(reinterpret_cast<cufftDoubleComplex*>(Uin_kernel_ptr), dimx, dimy, wvl, x1_kernel_ptr, y1_kernel_ptr, f, Dz, x2_kernel_ptr, y2_kernel_ptr);
//
// // do the fft
// // CUFFT plan
// cufftHandle plan;
// cufftSafeCall(cufftPlan2d(&plan,dimx, dimy, CUFFT_Z2Z));
//
// // execution
// cufftSafeCall(cufftExecZ2Z(plan, (cufftDoubleComplex *)Uin_kernel_ptr, (cufftDoubleComplex *)Uin_kernel_ptr, CUFFT_FORWARD));
//
// // do the ffthift in a kernel....
//
// // copy device memory back to host
// cutilSafeCall(cudaMemcpy(Uin_ptr, Uin_kernel_ptr, sizeof(complex<double>)*dimx*dimy, cudaMemcpyDeviceToHost));
// cutilSafeCall(cudaMemcpy(*x2_ptrptr, x2_kernel_ptr, sizeof(double)*dimx, cudaMemcpyDeviceToHost));
// cutilSafeCall(cudaMemcpy(*y2_ptrptr, y2_kernel_ptr, sizeof(double)*dimy, cudaMemcpyDeviceToHost));
//
// return true;
//}
//
//bool cu_fraunhofer(complex<double>* Uin_ptr, unsigned int dimx, unsigned int dimy, double wvl, double* x1_ptr, double* y1_ptr, double Dz, double** x2_ptrptr, double** y2_ptrptr)
//{
// return true;
//}
//
//bool cu_fraunhofer_1D(complex<double>* Uin_ptr, unsigned int dimx, double wvl, double* x1_ptr, double Dz, double** x2_ptrptr)
//{
// return true;
//}
//
//bool cu_fftshift(complex<double>* in, unsigned int dimx, unsigned int dimy)
//{
// return true;
//}
//
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory.
*/
__global__ void reduce_kernel3_overlap(cuDoubleComplex *g_idata, cuDoubleComplex *g_odata, unsigned int n)
{
cuDoubleComplex *sdata = SharedMemory<cuDoubleComplex>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
cuDoubleComplex mySum = (i < n) ? cuCmul(g_idata[i],g_idata[i]) : make_cuDoubleComplex(0.0,0.0);
if (i + blockDim.x < n)
mySum = cuCadd(mySum, cuCmul(g_idata[i+blockDim.x],g_idata[i+blockDim.x]));
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = cuCadd(mySum, sdata[tid + s]);
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory.
*/
__global__ void reduce_kernel3_final(cuDoubleComplex *g_idata, double *g_odata, unsigned int *g_index, unsigned int n)
{
cuDoubleComplex *sdata = SharedMemory<cuDoubleComplex>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
cuDoubleComplex mySum = (i < n) ? g_idata[i] : make_cuDoubleComplex(0.0,0.0);
if (i + blockDim.x < n)
mySum = cuCadd(mySum, g_idata[i+blockDim.x]);
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = cuCadd(mySum, sdata[tid + s]);
}
__syncthreads();
}
// write result for this block to global mem for raw signal
if (tid == 0)
{
g_odata[(*g_index)] = cuCabs(sdata[0])*cuCabs(sdata[0]);
*g_index=*g_index+1; // increment index
}
}
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory.
*/
__global__ void reduce_kernel3(cuDoubleComplex *g_idata, cuDoubleComplex *g_odata, unsigned int n)
{
cuDoubleComplex *sdata = SharedMemory<cuDoubleComplex>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
cuDoubleComplex mySum = (i < n) ? g_idata[i] : make_cuDoubleComplex(0.0,0.0);
if (i + blockDim.x < n)
mySum = cuCadd(mySum, g_idata[i+blockDim.x]);
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = cuCadd(mySum, sdata[tid + s]);
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize, bool nIsPow2> __global__ void reduce6(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
mySum += g_idata[i+blockSize];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512)
{
if (tid < 256)
{
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
__syncthreads();
}
if (blockSize >= 256)
{
if (tid < 128)
{
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
__syncthreads();
}
if (blockSize >= 128)
{
if (tid < 64)
{
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
__syncthreads();
}
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile T *smem = sdata;
if (blockSize >= 64)
{
smem[tid] = mySum = mySum + smem[tid + 32];
}
if (blockSize >= 32)
{
smem[tid] = mySum = mySum + smem[tid + 16];
}
if (blockSize >= 16)
{
smem[tid] = mySum = mySum + smem[tid + 8];
}
if (blockSize >= 8)
{
smem[tid] = mySum = mySum + smem[tid + 4];
}
if (blockSize >= 4)
{
smem[tid] = mySum = mySum + smem[tid + 2];
}
if (blockSize >= 2)
{
smem[tid] = mySum = mySum + smem[tid + 1];
}
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
////////////////////////////////////////////////////////////////////////////////
// This function performs a reduction of the input data multiple times and
// measures the average reduction time.
////////////////////////////////////////////////////////////////////////////////
void reduceOverlap(int n,
int numThreads,
int numBlocks,
int maxThreads,
int maxBlocks,
cuDoubleComplex *d_idata,
cuDoubleComplex *d_odata,
double *d_rawSig,
double *h_rawSig,
unsigned int *d_index)
{
dim3 dimBlock(numThreads,1,1);
dim3 dimGrid(numBlocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (numThreads <= 32) ? 2 * numThreads * sizeof(cuDoubleComplex) : numThreads * sizeof(cuDoubleComplex);
// execute the kernel (with squaring of the field values)
reduce_kernel3_overlap<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, n);
cuDoubleComplex *h_pOutData;
h_pOutData=(cuDoubleComplex*)malloc(numBlocks*sizeof(cuDoubleComplex));
cudaMemcpy(h_pOutData, d_odata, numBlocks*sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost);
// sum partial block sums on GPU
int s=numBlocks;
while (s > 1)
{
int l_threads = 0, l_blocks = 0;
getNumBlocksAndThreads(3, s, maxBlocks, maxThreads, l_blocks, l_threads);
dim3 l_dimBlock(l_threads,1,1);
dim3 l_dimGrid(l_blocks,1,1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (numThreads <= 32) ? 2 * numThreads * sizeof(cuDoubleComplex) : numThreads * sizeof(cuDoubleComplex);
// the last reduction executes in only one block. The result of this is the final result of our overlap integral and needs to be saved in global memory for the raw signal
if (l_blocks==1)
reduce_kernel3_final<<<l_dimGrid, l_dimBlock, smemSize>>>(d_odata, d_rawSig, d_index, s);
else
// execute pure reduction kernel
reduce_kernel3<<<l_dimGrid, l_dimBlock, smemSize>>>(d_odata, d_odata, s);
// update number of active blocks
s = (s + (l_threads*2-1)) / (l_threads*2);
}
unsigned int index;
cudaMemcpy(&index, d_index, sizeof(unsigned int), cudaMemcpyDeviceToHost);
// copy final sum from device to host
cudaMemcpy(h_rawSig, d_rawSig, sizeof(double), cudaMemcpyDeviceToHost);
}
///*
// This version adds multiple elements per thread sequentially. This reduces the overall
// cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
// (Brent's Theorem optimization)
//
// Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
// In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
// If blockSize > 32, allocate blockSize*sizeof(T) bytes.
//*/
//template <unsigned int blockSize, bool nIsPow2> __global__ void calcOverlap(cuDoubleComplex *g_idata, cuDoubleComplex *g_odata, unsigned int n)
//{
// cuDoubleComplex *sdata = SharedMemory<cuDoubleComplex>();
//
// // perform first level of reduction,
// // reading from global memory, writing to shared memory
// unsigned int tid = threadIdx.x;
// unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
// unsigned int gridSize = blockSize*2*gridDim.x;
//
// cuDoubleComplex mySum = make_cuDoubleComplex(0.0,0.0);
//
// // we reduce multiple elements per thread. The number is determined by the
// // number of active thread blocks (via gridDim). More blocks will result
// // in a larger gridSize and therefore fewer elements per thread
// while (i < n)
// {
// mySum = cuCadd(mySum, cuCmul(g_idata[i],g_idata[i]));
//
// // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
// if (nIsPow2 || i + blockSize < n)
// mySum = cuCadd(mySum, cuCmul(g_idata[i+blockSize],g_idata[i+blockSize]));
//
// i += gridSize;
// }
//
// // each thread puts its local sum into shared memory
// sdata[tid] = mySum;
// __syncthreads();
//
//
// // do reduction in shared mem
// if (blockSize >= 512)
// {
// if (tid < 256)
// {
// sdata[tid] = mySum = cuCadd(mySum,sdata[tid + 256]);
// }
//
// __syncthreads();
// }
//
// if (blockSize >= 256)
// {
// if (tid < 128)
// {
// sdata[tid] = mySum = cuCadd(mySum, sdata[tid + 128]);
// }
//
// __syncthreads();
// }
//
// if (blockSize >= 128)
// {
// if (tid < 64)
// {
// sdata[tid] = mySum = cuCadd(mySum, sdata[tid + 64]);
// }
//
// __syncthreads();
// }
//
// if (tid < 32)
// {
// // now that we are using warp-synchronous programming (below)
// // we need to declare our shared memory volatile so that the compiler
// // doesn't reorder stores to it and induce incorrect behavior.
// //volatile cuDoubleComplex *smem = sdata;
// volatile cuDoubleComplex *smem = sdata;
//
// if (blockSize >= 64)
// {
// smem[tid] = mySum = cuCadd(mySum, smem[tid + 32]);
// }
//
// if (blockSize >= 32)
// {
// mySum=cuCadd(mySum, smem[tid + 16]);
// smem[tid] = mySum;// = cuCadd(mySum, smem[tid + 16]);
// }
//
// if (blockSize >= 16)
// {
// smem[tid] = mySum = cuCadd(mySum, smem[tid + 8]);
// }
//
// if (blockSize >= 8)
// {
// smem[tid] = mySum = cuCadd(mySum, smem[tid + 4]);
// }
//
// if (blockSize >= 4)
// {
// smem[tid] = mySum = cuCadd(mySum, smem[tid + 2]);
// }
//
// if (blockSize >= 2)
// {
// smem[tid] = mySum = cuCadd(mySum, smem[tid + 1]);
// }
// }
//
// // write result for this block to global mem
// //if (tid == 0)
// // g_odata[blockIdx.x] = sdata[0];
//}
double cu_testReduce_wrapper()
{
cudaDeviceProp deviceProp;
cudaError_t error;
// check device
error = cudaGetDeviceProperties(&deviceProp, 0);
if (error != cudaSuccess)
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaGetDeviceProperties returned error code " << error << " line: " << __LINE__ << "...\n";
return 0;
}
// use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
int n=1024*1024;
cuDoubleComplex *inData;
inData=(cuDoubleComplex*)malloc(n*sizeof(cuDoubleComplex));
cuDoubleComplex *d_inData;
for (unsigned int i=0; i<n; ++i)
{
inData[i]=make_cuDoubleComplex(2.0,0.0);
}
if (cudaSuccess != cudaMalloc((void**)&d_inData, n*sizeof(cuDoubleComplex)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
// raw signal
double *d_rawSig;
double rawSig=0.0;
if (cudaSuccess != cudaMalloc((void**)&d_rawSig, sizeof(double)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
if (cudaSuccess != cudaMemcpy(d_rawSig, &rawSig, sizeof(double), cudaMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
// index of raw signal
unsigned int *d_index;
unsigned int index=0;
if (cudaSuccess != cudaMalloc((void**)&d_index, sizeof(unsigned int)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
if (cudaSuccess != cudaMemcpy(d_index, &index, sizeof(unsigned int), cudaMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
int blocksReduction;
int threadsReduction;
int maxBlocks=deviceProp.maxGridSize[0];//64; // why this number??
int maxThreads=deviceProp.maxThreadsPerBlock;
getNumBlocksAndThreads(3, n, maxBlocks, maxThreads, blocksReduction, threadsReduction);
dim3 dimBlockReduction(threadsReduction,1,1);
dim3 dimGridReduction(blocksReduction,1,1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threadsReduction <= block_size) ? 2 * threadsReduction * sizeof(cuDoubleComplex) : threadsReduction * sizeof(cuDoubleComplex);
cuDoubleComplex *d_outData;
if (cudaSuccess != cudaMalloc((void**)&d_outData, blocksReduction*sizeof(cuDoubleComplex)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
double outData=0;
// do the summation
reduceOverlap(n, threadsReduction, blocksReduction, maxThreads, maxBlocks, d_inData, d_outData, d_rawSig, &outData, d_index);
//reduceOverlap(params.n*params.n, threadsReduction, blocksReduction, maxThreads, maxBlocks, d_pObjField, d_pOutData, h_pRawSig, d_index);
//switch (n)
//{
//case 32:
// calcOverlap<32, true><<<dimGrid, dimBlock, smemSize>>>(d_inData, d_outData, n); break;
//case 64:
// calcOverlap<64, true><<<dimGrid, dimBlock, smemSize>>>(d_inData, d_outData, n); break;
//}
cudaFree(d_outData);
cudaFree(d_inData);
delete inData;
return 1.0;//outData;
}
__global__ void innerProduct(Lock lock, cuDoubleComplex *field, cuDoubleComplex *out, int *outIdx, int N)
{
__shared__ cuDoubleComplex cache[THREADSPERBLOCK];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIdx=threadIdx.x;
cuDoubleComplex temp=make_cuDoubleComplex(0.0, 0.0);
// square each element
while (tid < N)
{
temp=cuCadd(temp, cuCmul(field[tid], field[tid]));
tid += blockDim.x*gridDim.x;
}
// set the cache values
cache[cacheIdx]=temp;
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of two because of the following code
int i=blockDim.x/2;
while (i != 0)
{
if (cacheIdx < i)
cache[cacheIdx] = cuCadd(cache[cacheIdx], cache[cacheIdx+i]);
__syncthreads();
i = i/2;
}
if (cacheIdx==0)
{
lock.lock();
out[*outIdx]=cuCadd(out[*outIdx], cache[0]);
lock.unlock();
}
}
__global__ void incrementIdx(int *idx)
{
*idx+=1;
}
__global__ void myReduce(Lock lock, cuDoubleComplex *field, cuDoubleComplex *out, int N)
{
__shared__ cuDoubleComplex cache[THREADSPERBLOCK];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIdx=threadIdx.x;
cuDoubleComplex temp=make_cuDoubleComplex(0.0, 0.0);
// square each element
while (tid < N)
{
temp=cuCadd(temp, cuCmul(field[tid], field[tid]));
tid += blockDim.x*gridDim.x;
}
// set the cache values
cache[cacheIdx]=temp;
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of two because of the following code
int i=blockDim.x/2;
while (i != 0)
{
if (cacheIdx < i)
cache[cacheIdx] = cuCadd(cache[cacheIdx], cache[cacheIdx+i]);
__syncthreads();
i = i/2;
}
if (cacheIdx==0)
{
lock.lock();
*out=cuCadd(*out, cache[0]);
lock.unlock();
}
}
bool cu_simConfPointRawSig_wrapperTest(double** ppRawSig, ConfPoint_KernelParams params)
{
size_t N = params.n*params.n;
cudaDeviceProp deviceProp;
cudaError_t error;
// check device
error = cudaGetDeviceProperties(&deviceProp, 0);
if (error != cudaSuccess)
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaGetDeviceProperties returned error code " << error << " line: " << __LINE__ << "...\n";
return 0;
}
cuDoubleComplex * l_pHostData=(cuDoubleComplex*)malloc(N*sizeof(cuDoubleComplex));
for (int i=0;i<N;i++)
{
l_pHostData[i]=make_cuDoubleComplex(10000.0,5000.0);
}
// obtain raw pointer to device memory
cuDoubleComplex * l_pDeviceData;
cudaMalloc((void **) &l_pDeviceData, N * sizeof(cuDoubleComplex));
error=cudaMemcpy(l_pDeviceData, l_pHostData,N*sizeof(cuDoubleComplex),cudaMemcpyHostToDevice);
// transfer data to GPU
if (error != cudaSuccess)
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMemcpy returned error code " << error << " line: " << __LINE__ << "...\n";
return 0;
}
*ppRawSig=(double*)calloc(1,sizeof(double));
cuDoubleComplex l_hostOutData=make_cuDoubleComplex(0.0,0.0);
cuDoubleComplex *l_pDeviceOutData;
if (cudaSuccess != cudaMalloc((void**)&l_pDeviceOutData, sizeof(cuDoubleComplex)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
if (cudaSuccess != cudaMemcpy(l_pDeviceOutData, &l_hostOutData, sizeof(cuDoubleComplex), cudaMemcpyHostToDevice) )
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
Lock lock;
int blocksPerGrid=(N+THREADSPERBLOCK-1)/THREADSPERBLOCK;
myReduce<<<blocksPerGrid, 1024>>>(lock, l_pDeviceData, l_pDeviceOutData, N);
// copy data back from GPU
if (cudaSuccess != cudaMemcpy(&l_hostOutData, l_pDeviceOutData, sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost) )
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
(*ppRawSig)[0]=cuCabs(l_hostOutData);
// free memory
cudaFree(l_pDeviceData);
cudaFree(l_pDeviceOutData);
delete l_pHostData;
return true;
}
bool cu_simConfPointSensorSig_wrapper(double** ppSensorSig, ConfPoint_KernelParams params, ConfPoint_KernelObjectParams paramsObject)
{
cudaDeviceProp deviceProp;
cudaError_t error;
// timing stuff
clock_t start, end, startGes, endGes;
double msecs_DataTransfer=0;
double msecs_ObjectInteraction=0;
double msecs_Defoc=0;
double msecs_FFT=0;
double msecs_Reduce=0;
double msecs_createField=0;
double msecs=0;
// start timing
start=clock();
startGes=clock();
// check device
error = cudaGetDeviceProperties(&deviceProp, 0);
if (error != cudaSuccess)
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: cudaGetDeviceProperties returned error code " << error << " line: " << __LINE__ << "...\n";
return false;
}
// allocate host memory for raw signal
(*ppSensorSig)=(double*)calloc(params.scanNumber.x*params.scanNumber.y,sizeof(double));
// allocate host memory for complex raw signal
cuDoubleComplex* l_pRawSig=(cuDoubleComplex*)calloc(params.scanNumber.z,sizeof(cuDoubleComplex));
cuDoubleComplex* l_pRawSigInit=(cuDoubleComplex*)calloc(params.scanNumber.z,sizeof(cuDoubleComplex));
double* l_pAbsVal=(double*)calloc(params.scanNumber.z,sizeof(double));
// allocate device memory for raw signal
cuDoubleComplex* d_pRawSig;
if (cudaSuccess != cudaMalloc((void**)&d_pRawSig, params.scanNumber.z*sizeof(cuDoubleComplex)))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// transfer rawSig to device
//if (cudaSuccess != cudaMemcpy(d_pRawSig, l_pRawSig, params.scanNumber.z*sizeof(cuDoubleComplex), cudaMemcpyHostToDevice))
//{
// std::cout << "error in cu_simConfPointSensorSig_wrapper: cudaMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
// return false;
//}
// use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
// allocate device meory for params
ConfPoint_KernelParams* d_pParams;
if (cudaSuccess != cudaMalloc((void**)&d_pParams, sizeof(ConfPoint_KernelParams)))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// transfer params to device
if (cudaSuccess != cudaMemcpy(d_pParams, ¶ms, sizeof(ConfPoint_KernelParams), cudaMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: cudaMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// allocate device meory for params
ConfPoint_KernelObjectParams* d_pObjParams;
if (cudaSuccess != cudaMalloc((void**)&d_pObjParams, sizeof(ConfPoint_KernelObjectParams)))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// transfer params to device
if (cudaSuccess != cudaMemcpy(d_pObjParams, ¶msObject, sizeof(ConfPoint_KernelObjectParams), cudaMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: cudaMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// allocate device memory for pupil field
cuDoubleComplex* d_pPupField;
if (cudaSuccess != cudaMalloc((void**)&d_pPupField, params.n*params.n*sizeof(cuDoubleComplex)))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// allocate device memory for object field
cuDoubleComplex* d_pObjField;
if (cudaSuccess != cudaMalloc((void**)&d_pObjField, params.n*params.n*sizeof(cuDoubleComplex)))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// calc dimensions of kernel launch when have one kernel per element in the pupil field
dim3 dimBlock(block_size,block_size,1); // number of threads within each block in x,y,z (maximum of 512 or 1024 in total. I.e. 512,1,1 or 8,16,2 or ...
unsigned int mod= params.n % block_size;
unsigned int dimGridx;
if (mod==0)
dimGridx=params.n/(1*block_size);
else
dimGridx=params.n/(1*block_size+1);
unsigned int dimGridy;
if (mod==0)
dimGridy=params.n/(1*block_size);
else
dimGridy=params.n/(1*block_size+1);
dim3 dimGrid(std::max(dimGridx,unsigned int(1)),std::max(dimGridy,unsigned int(1)),1); // number of blocks in x,y,z (maximum of 65535 for each dimension)
cudaDeviceSynchronize();
end=clock();
msecs_DataTransfer=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
start=clock();
cufftHandle plan;
if (!myCufftSafeCall(cufftPlan2d(&plan,params.n, params.n, CUFFT_Z2Z)))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: cufftPlan2d returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// create rawSigIdx on GPU
int rawSigIdx=0;
int *d_pRawSigIdx;
if (cudaSuccess != cudaMalloc((void**)&d_pRawSigIdx, sizeof(int)))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
// transfer rawSig to device
if (cudaSuccess != cudaMemcpy(d_pRawSigIdx, &rawSigIdx, sizeof(int), cudaMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: cudaMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// create cog on GPU
double cog=0;
double *d_pCog;
if (cudaSuccess != cudaMalloc((void**)&d_pCog, sizeof(double)))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
// transfer cog to device
if (cudaSuccess != cudaMemcpy(d_pCog, &cog, sizeof(int), cudaMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: cudaMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
cudaDeviceSynchronize();
end=clock();
msecs_DataTransfer+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
Lock lock;
int blocksPerGrid=(params.n*params.n+THREADSPERBLOCK-1)/THREADSPERBLOCK;
// allocate host memory for pupil field
cuDoubleComplex* h_pPupField=(cuDoubleComplex*)malloc(params.n*params.n*sizeof(cuDoubleComplex));
// do the simulation
for (unsigned int jy=0; jy<params.scanNumber.y; jy++)
{
for (unsigned int jx=0; jx<params.scanNumber.x; jx++)
{
start=clock();
// create pupil field according to aberrations
createField_kernel<<<dimGrid,dimBlock>>>(d_pPupField, d_pParams);
cudaDeviceSynchronize();
end=clock();
msecs_createField+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
// reset rawSigIdx
if (cudaSuccess != cudaMemcpy(d_pRawSigIdx, &rawSigIdx, sizeof(int), cudaMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: cudaMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
cudaDeviceSynchronize();
// reset rawSig
if (cudaSuccess != cudaMemcpy(d_pRawSig, l_pRawSigInit, params.scanNumber.z*sizeof(cuDoubleComplex), cudaMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: cudaMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
cudaDeviceSynchronize();
for (unsigned int jz=0; jz<params.scanNumber.z; jz++)
{
start=clock();
// apply defocus
defocField_kernel<<<dimGrid,dimBlock>>>(d_pPupField, d_pParams);
cudaDeviceSynchronize();
end=clock();
msecs_Defoc+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
// transfer pupil field from device
if (cudaSuccess != cudaMemcpy(h_pPupField, d_pPupField, params.n*params.n*sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost))
return false;
char t_filename[512];
sprintf(t_filename, "E:\\pupReal%i.txt", jz);
FILE* hFile;
hFile = fopen( t_filename, "w" ) ;
if ( (hFile == NULL) )
return 1;
for (unsigned int jy=0; jy<params.n; jy++)
{
for (unsigned int jx=0; jx<params.n; jx++)
{
fprintf(hFile, " %.16e;\n", h_pPupField[jx+jy*params.n].x);
}
}
fclose(hFile);
sprintf(t_filename, "E:\\pupImag%i.txt", jz);
hFile = fopen( t_filename, "w" ) ;
if ( (hFile == NULL) )
return 1;
for (unsigned int jy=0; jy<params.n; jy++)
{
for (unsigned int jx=0; jx<params.n; jx++)
{
fprintf(hFile, " %.16e;\n", h_pPupField[jx+jy*params.n].y);
}
}
fclose(hFile);
start=clock();
// note that object field is not fftshifted after call to cufft !!
if (!myCufftSafeCall(cufftExecZ2Z(plan, (cufftDoubleComplex *)d_pPupField, (cufftDoubleComplex *)d_pObjField, CUFFT_FORWARD)))
{
// try again, just to be sure...
if (!myCufftSafeCall(cufftExecZ2Z(plan, (cufftDoubleComplex *)d_pPupField, (cufftDoubleComplex *)d_pObjField, CUFFT_FORWARD)))
{
cudaFree(d_pParams);
cudaFree(d_pPupField);
cudaFree(d_pObjField);
cudaFree(d_pRawSig);
cudaFree(d_pObjParams);
cudaFree(d_pRawSigIdx);
cufftDestroy (plan);
delete l_pRawSig;
delete l_pRawSigInit;
//thrust::device_free(d_pObjField_thrust);
std::cout << "error in cu_simConfPointSensorSig_wrapper: cufftExecZ2Z returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
}
cudaDeviceSynchronize();
end=clock();
msecs_FFT+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
// transfer object field from device
if (cudaSuccess != cudaMemcpy(h_pPupField, d_pObjField, params.n*params.n*sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost))
return false;
sprintf(t_filename, "E:\\objInReal%i.txt", jz);
hFile = fopen( t_filename, "w" ) ;
if ( (hFile == NULL) )
return 1;
for (unsigned int jy=0; jy<params.n; jy++)
{
for (unsigned int jx=0; jx<params.n; jx++)
{
fprintf(hFile, " %.16e;\n", h_pPupField[jx+jy*params.n].x);
}
}
fclose(hFile);
sprintf(t_filename, "E:\\objInImag%i.txt", jz);
hFile = fopen( t_filename, "w" ) ;
if ( (hFile == NULL) )
return 1;
for (unsigned int jy=0; jy<params.n; jy++)
{
for (unsigned int jx=0; jx<params.n; jx++)
{
fprintf(hFile, " %.16e;\n", h_pPupField[jx+jy*params.n].y);
}
}
fclose(hFile);
// do the object interaction in TEA
objectInteractionTEA<<<dimGrid,dimBlock>>>(d_pObjField, d_pParams, d_pObjParams, jx);
// allocate host memory for pupil field
// transfer pupil field from device
if (cudaSuccess != cudaMemcpy(h_pPupField, d_pObjField, params.n*params.n*sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost))
return false;
sprintf(t_filename, "E:\\objOutReal%i.txt", jz);
hFile = fopen( t_filename, "w" ) ;
if ( (hFile == NULL) )
return 1;
for (unsigned int jy=0; jy<params.n; jy++)
{
for (unsigned int jx=0; jx<params.n; jx++)
{
fprintf(hFile, " %.16e;\n", h_pPupField[jx+jy*params.n].x);
}
}
fclose(hFile);
sprintf(t_filename, "E:\\objOutImag%i.txt", jz);
hFile = fopen( t_filename, "w" ) ;
if ( (hFile == NULL) )
return 1;
for (unsigned int jy=0; jy<params.n; jy++)
{
for (unsigned int jx=0; jx<params.n; jx++)
{
fprintf(hFile, " %.16e;\n", h_pPupField[jx+jy*params.n].y);
}
}
fclose(hFile);
start=clock();
// calc the inner product on GPU
innerProduct<<<blocksPerGrid, THREADSPERBLOCK>>>(lock, d_pObjField, d_pRawSig, d_pRawSigIdx, params.n*params.n);
incrementIdx<<<1,1>>>(d_pRawSigIdx);
cudaDeviceSynchronize();
end=clock();
msecs_Reduce+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
}
start=clock();
// copy data back from GPU
if (cudaSuccess != cudaMemcpy(l_pRawSig, d_pRawSig, params.scanNumber.z*sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost) )
{
std::cout << "error in cu_simConfPointSensorSig_wrapper: cudaMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
cudaDeviceSynchronize();
end=clock();
msecs_DataTransfer+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
char t_filename[512];
sprintf(t_filename, "E:\\rawSigReal%i.txt", jx);
FILE* hFile;
hFile = fopen( t_filename, "w" ) ;
if ( (hFile == NULL) )
return 1;
for (unsigned int idz=0; idz<params.scanNumber.z; idz++)
{
fprintf(hFile, " %.16e;\n", l_pRawSig[idz].x);
}
fclose(hFile);
sprintf(t_filename, "E:\\rawSigImag%i.txt", jx);
hFile = fopen( t_filename, "w" ) ;
if ( (hFile == NULL) )
return 1;
for (unsigned int idz=0; idz<params.scanNumber.z; idz++)
{
fprintf(hFile, " %.16e;\n", l_pRawSig[idz].y);
}
fclose(hFile);
// find signal maxmium
double sigMax=0;
for (unsigned int idx=0; idx<params.scanNumber.z; idx++)
{
l_pAbsVal[idx]=pow(cuCabs(l_pRawSig[idx]),2);
sigMax=(sigMax > l_pAbsVal[idx]) ? sigMax : l_pAbsVal[idx];
}
// calc cog
double nom=0;
double denom=0;
for (unsigned int idx=0; idx<params.scanNumber.z; idx++)
{
if (l_pAbsVal[idx] > sigMax/2)
{
nom+=double(idx)*l_pAbsVal[idx];
denom+=l_pAbsVal[idx];
}
}
double x=jx*params.scanStep.x;
double z0=paramsObject.A*cos(paramsObject.kN*x);
(*ppSensorSig)[jx+jy*params.scanNumber.y]=nom/denom*params.scanStep.z-params.scanStep.z*params.scanNumber.z/2+z0;
}
}
std::cout << msecs_DataTransfer << "msec for data transfer between CPU and GPU" << "\n";
std::cout << msecs_FFT << "msec for fft" << "\n";
std::cout << msecs_Defoc << "msec for defocus kernel" << "\n";
std::cout << msecs_createField << "msec to create the field" << "\n";
std::cout << msecs_Reduce << "msec to calculate the reduction" << "\n";
std::cout << msecs_ObjectInteraction << "msec for calculating object interaction" << "\n";
// end timing
endGes=clock();
msecs=((endGes-startGes)/(double)CLOCKS_PER_SEC*1000.0);
std::cout << msecs <<" ms to simulate confocal raw signal"<< "...\n";
cudaFree(d_pObjParams);
cudaFree(d_pParams);
cudaFree(d_pPupField);
cudaFree(d_pObjField);
cudaFree(d_pObjParams);
cudaFree(d_pRawSigIdx);
delete l_pRawSig;
delete l_pRawSigInit;
cufftDestroy (plan);
return true;
}
bool cu_simConfPointRawSig_wrapper(double** ppRawSig, ConfPoint_KernelParams params)
{
cudaDeviceProp deviceProp;
cudaError_t error;
// timing stuff
clock_t start, end, startGes, endGes;
double msecs_DataTransfer=0;
double msecs_DataTransfer1=0;
double msecs_Defoc=0;
double msecs_FFT=0;
double msecs_Reduce=0;
double msecs_createField=0;
double msecs=0;
// start timing
start=clock();
startGes=clock();
// check device
error = cudaGetDeviceProperties(&deviceProp, 0);
if (error != cudaSuccess)
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaGetDeviceProperties returned error code " << error << " line: " << __LINE__ << "...\n";
return false;
}
// allocate host memory for raw signal
*ppRawSig=(double*)calloc(params.scanNumber.x*params.scanNumber.y*params.scanNumber.z,sizeof(double));
// allocate host memory for complex raw signal
cuDoubleComplex* l_pRawSig=(cuDoubleComplex*)calloc(params.scanNumber.x*params.scanNumber.y*params.scanNumber.z,sizeof(cuDoubleComplex));
// allocate device memory for raw signal
cuDoubleComplex* d_pRawSig;
if (cudaSuccess != cudaMalloc((void**)&d_pRawSig, params.scanNumber.x*params.scanNumber.y*params.scanNumber.z*sizeof(cuDoubleComplex)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// transfer rawSig to device
if (cudaSuccess != cudaMemcpy(d_pRawSig, l_pRawSig, params.scanNumber.x*params.scanNumber.y*params.scanNumber.z*sizeof(cuDoubleComplex), cudaMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
// allocate device meory for params
ConfPoint_KernelParams* d_pParams;
if (cudaSuccess != cudaMalloc((void**)&d_pParams, sizeof(ConfPoint_KernelParams)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// transfer params to device
if (cudaSuccess != cudaMemcpy(d_pParams, ¶ms, sizeof(ConfPoint_KernelParams), cudaMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// allocate device memory for pupil field
cuDoubleComplex* d_pPupField;
if (cudaSuccess != cudaMalloc((void**)&d_pPupField, params.n*params.n*sizeof(cuDoubleComplex)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// allocate device memory for object field
cuDoubleComplex* d_pObjField;
if (cudaSuccess != cudaMalloc((void**)&d_pObjField, params.n*params.n*sizeof(cuDoubleComplex)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// calc dimensions of kernel launch when have one kernel per element in the pupil field
dim3 dimBlock(block_size,block_size,1); // number of threads within each block in x,y,z (maximum of 512 or 1024 in total. I.e. 512,1,1 or 8,16,2 or ...
unsigned int mod= params.n % block_size;
unsigned int dimGridx;
if (mod==0)
//dimGridx=params.n/(8*block_size);
dimGridx=params.n/(1*block_size);
else
dimGridx=params.n/(1*block_size+1);
unsigned int dimGridy;
if (mod==0)
dimGridy=params.n/(1*block_size);
else
dimGridy=params.n/(1*block_size+1);
dim3 dimGrid(std::max(dimGridx,unsigned int(1)),std::max(dimGridy,unsigned int(1)),1); // number of blocks in x,y,z (maximum of 65535 for each dimension)
cudaDeviceSynchronize();
end=clock();
msecs_DataTransfer=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
start=clock();
// create pupil field according to aberrations
createField_kernel<<<dimGrid,dimBlock>>>(d_pPupField, d_pParams);
cudaDeviceSynchronize();
end=clock();
msecs_createField=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
//// allocate host memory for pupil field
//cuDoubleComplex* h_pPupField=(cuDoubleComplex*)malloc(params.n*params.n*sizeof(cuDoubleComplex));
//// transfer pupil field from device
//if (cudaSuccess != cudaMemcpy(h_pPupField, d_pPupField, params.n*params.n*sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost))
// return false;
//char t_filename[512];
//sprintf(t_filename, "E:\\testReal.txt");
//FILE* hFile;
//hFile = fopen( t_filename, "w" ) ;
//if ( (hFile == NULL) )
// return 1;
//for (unsigned int jy=0; jy<params.n; jy++)
//{
// for (unsigned int jx=0; jx<params.n; jx++)
// {
// fprintf(hFile, " %.16e;\n", h_pPupField[jx+jy*params.n].x);
// }
//}
//fclose(hFile);
//sprintf(t_filename, "E:\\testImag.txt");
//hFile = fopen( t_filename, "w" ) ;
//if ( (hFile == NULL) )
// return 1;
//for (unsigned int jy=0; jy<params.n; jy++)
//{
// for (unsigned int jx=0; jx<params.n; jx++)
// {
// fprintf(hFile, " %.16e;\n", h_pPupField[jx+jy*params.n].y);
// }
//}
//fclose(hFile);
start=clock();
cufftHandle plan;
if (!myCufftSafeCall(cufftPlan2d(&plan,params.n, params.n, CUFFT_Z2Z)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cufftPlan2d returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// create rawSigIdx on GPU
int rawSigIdx=0;
int *d_pRawSigIdx;
if (cudaSuccess != cudaMalloc((void**)&d_pRawSigIdx, sizeof(int)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
// transfer rawSig to device
if (cudaSuccess != cudaMemcpy(d_pRawSigIdx, &rawSigIdx, sizeof(int), cudaMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
cudaDeviceSynchronize();
end=clock();
msecs_DataTransfer+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
Lock lock;
int blocksPerGrid=(params.n*params.n+THREADSPERBLOCK-1)/THREADSPERBLOCK;
// do the simulation
for (unsigned int jy=0; jy<params.scanNumber.y; jy++)
{
for (unsigned int jx=0; jx<params.scanNumber.x; jx++)
{
for (unsigned int jz=0; jz<params.scanNumber.z; jz++)
{
start=clock();
// apply defocus
defocField_kernel<<<dimGrid,dimBlock>>>(d_pPupField, d_pParams);
cudaDeviceSynchronize();
end=clock();
msecs_Defoc+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
start=clock();
// note that object field is not fftshifted after call to cufft !!
if (!myCufftSafeCall(cufftExecZ2Z(plan, (cufftDoubleComplex *)d_pPupField, (cufftDoubleComplex *)d_pObjField, CUFFT_FORWARD)))
{
// try again, just to be sure...
if (!myCufftSafeCall(cufftExecZ2Z(plan, (cufftDoubleComplex *)d_pPupField, (cufftDoubleComplex *)d_pObjField, CUFFT_FORWARD)))
{
cudaFree(d_pParams);
cudaFree(d_pPupField);
cudaFree(d_pObjField);
cudaFree(d_pRawSig);
cudaFree(d_pRawSigIdx);
cufftDestroy (plan);
//thrust::device_free(d_pObjField_thrust);
std::cout << "error in cu_simConfPointRawSig_wrapper: cufftExecZ2Z returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
}
cudaDeviceSynchronize();
end=clock();
msecs_FFT+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
start=clock();
// calc the inner product on GPU
innerProduct<<<blocksPerGrid, THREADSPERBLOCK>>>(lock, d_pObjField, d_pRawSig, d_pRawSigIdx, params.n*params.n);
incrementIdx<<<1,1>>>(d_pRawSigIdx);
cudaDeviceSynchronize();
end=clock();
msecs_Reduce+=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
}
}
}
clock_t start1=clock();
// copy data back from GPU
if (cudaSuccess != cudaMemcpy(l_pRawSig, d_pRawSig, params.scanNumber.x*params.scanNumber.y*params.scanNumber.z*sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost) )
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
cudaDeviceSynchronize();
clock_t end1=clock();
msecs_DataTransfer1=((end1-start1)/(double)CLOCKS_PER_SEC*1000.0);
std::cout << msecs_DataTransfer << "msec for data transfer between CPU and GPU" << "\n";
std::cout << msecs_DataTransfer1 << "msec for data transfer1 between CPU and GPU" << "\n";
std::cout << msecs_FFT << "msec for fft" << "\n";
std::cout << msecs_Defoc << "msec for defocus kernel" << "\n";
std::cout << msecs_createField << "msec to create the field" << "\n";
std::cout << msecs_Reduce << "msec to calculate the reduction" << "\n";
// calc magnitude square
for (unsigned int idx=0; idx<params.scanNumber.x*params.scanNumber.y*params.scanNumber.z; idx++)
{
(*ppRawSig)[idx]=pow(cuCabs(l_pRawSig[idx]),2);
}
// end timing
endGes=clock();
msecs=((endGes-startGes)/(double)CLOCKS_PER_SEC*1000.0);
std::cout << msecs <<" ms to simulate confocal raw signal"<< "...\n";
cudaFree(d_pParams);
cudaFree(d_pPupField);
cudaFree(d_pObjField);
cudaFree(d_pRawSig);
cudaFree(d_pRawSigIdx);
delete l_pRawSig;
cufftDestroy (plan);
return true;
}
bool cu_simConfPointRawSig_wrapper1(double** ppRawSig, ConfPoint_KernelParams params)
{
cudaDeviceProp deviceProp;
cudaError_t error;
// check device
error = cudaGetDeviceProperties(&deviceProp, 0);
if (error != cudaSuccess)
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaGetDeviceProperties returned error code " << error << " line: " << __LINE__ << "...\n";
return false;
}
// use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
// allocate device meory for params
ConfPoint_KernelParams* d_pParams;
if (cudaSuccess != cudaMalloc((void**)&d_pParams, sizeof(ConfPoint_KernelParams)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// transfer params to device
if (cudaSuccess != cudaMemcpy(d_pParams, ¶ms, sizeof(ConfPoint_KernelParams), cudaMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// allocate device memory for pupil field
cuDoubleComplex* d_pPupField;
if (cudaSuccess != cudaMalloc((void**)&d_pPupField, params.n*params.n*sizeof(cuDoubleComplex)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// allocate device memory for object field
cuDoubleComplex* d_pObjField;
if (cudaSuccess != cudaMalloc((void**)&d_pObjField, params.n*params.n*sizeof(cuDoubleComplex)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// calc dimensions of kernel launch when have one kernel per element in the pupil field
dim3 dimBlock(block_size,block_size,1); // number of threads within each block in x,y,z (maximum of 512 or 1024 in total. I.e. 512,1,1 or 8,16,2 or ...
unsigned int mod= params.n % block_size;
unsigned int dimGridx;
if (mod==0)
dimGridx=params.n/block_size;
else
dimGridx=params.n/block_size+1;
unsigned int dimGridy;
if (mod==0)
dimGridy=params.n/block_size;
else
dimGridy=params.n/block_size+1;
dim3 dimGrid(std::max(dimGridx,unsigned int(1)),std::max(dimGridy,unsigned int(1)),1); // number of blocks in x,y,z (maximum of 65535 for each dimension)
// create pupil field according to aberrations
createField_kernel<<<dimGrid,dimBlock>>>(d_pPupField, d_pParams);
cufftHandle plan;
if (!myCufftSafeCall(cufftPlan2d(&plan,params.n, params.n, CUFFT_Z2Z)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cufftPlan2d returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// calc dimensions of kernel launch for reduction
int blocksReduction;
int threadsReduction;
int maxBlocks=1024; // why this number??
int maxThreads=deviceProp.maxThreadsPerBlock;
getNumBlocksAndThreads(3, params.n*params.n, maxBlocks, maxThreads, blocksReduction, threadsReduction);
dim3 dimBlockReduction(threadsReduction,1,1);
dim3 dimGridReduction(blocksReduction,1,1);
cuDoubleComplex *d_pOutData;
if (cudaSuccess != cudaMalloc((void**)&d_pOutData, blocksReduction*sizeof(cuDoubleComplex)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 100;
}
// alloacte host memory fpr raw signal
double rawSig=0.0;
double *h_pRawSig=(double*)malloc(params.scanNumber.x*params.scanNumber.y*params.scanNumber.z*sizeof(double));
// allocate device memory for raw signal
*ppRawSig=(double*)malloc(params.scanNumber.x*params.scanNumber.y*params.scanNumber.z*sizeof(double));
// allocate device memory for raw signal
double* d_pRawSig;
if (cudaSuccess != cudaMalloc((void**)&d_pRawSig, params.scanNumber.x*params.scanNumber.y*params.scanNumber.z*sizeof(double)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
// index of raw signal
unsigned int *d_index;
unsigned int index=0;
if (cudaSuccess != cudaMalloc((void**)&d_index, sizeof(unsigned int)))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMalloc returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
if (cudaSuccess != cudaMemcpy(d_index, &index, sizeof(unsigned int), cudaMemcpyHostToDevice))
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cudaMemcpy returned an error " << error << " line: " << __LINE__ << "...\n";
return 0;
}
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threadsReduction <= block_size) ? 2 * threadsReduction * sizeof(cuDoubleComplex) : threadsReduction * sizeof(cuDoubleComplex);
clock_t start, end;
double msecs=0;
// start timing
start=clock();
// do the simulation
for (unsigned int jy=0; jy<params.scanNumber.y; jy++)
{
for (unsigned int jx=0; jx<params.scanNumber.x; jx++)
{
for (unsigned int jz=0; jz<params.scanNumber.z; jz++)
{
// apply defocus
defocField_kernel<<<dimGrid,dimBlock>>>(d_pPupField, d_pParams);
// note that object field is not fftshifted after call to cufft !!
if (!myCufftSafeCall(cufftExecZ2Z(plan, (cufftDoubleComplex *)d_pPupField, (cufftDoubleComplex *)d_pObjField, CUFFT_FORWARD)))
{
cudaFree(d_pParams);
cudaFree(d_pPupField);
cudaFree(d_pObjField);
cudaFree(d_pRawSig);
cufftDestroy (plan);
{
std::cout << "error in cu_simConfPointRawSig_wrapper: cufftExecZ2Z returned an error " << error << " line: " << __LINE__ << "...\n";
return false;
}
}
// do the summation
reduceOverlap(params.n*params.n, threadsReduction, blocksReduction, maxThreads, maxBlocks, d_pObjField, d_pOutData, d_pRawSig, h_pRawSig, d_index);
}
}
}
cudaMemcpy(h_pRawSig, d_pRawSig, params.scanNumber.x*params.scanNumber.y*params.scanNumber.z*sizeof(double), cudaMemcpyDeviceToHost);
// end timing
end=clock();
msecs=((end-start)/(double)CLOCKS_PER_SEC*1000.0);
std::cout << msecs <<" ms to simulate confocal raw signal"<< "...\n";
cudaFree(d_pParams);
cudaFree(d_pPupField);
cudaFree(d_pObjField);
cudaFree(d_pRawSig);
cufftDestroy (plan);
return true;
} |
9340eceae64a2f64f48f87ed47fabd9c378020c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This application demonstrates an approach to the image segmentation
* trees construction. It is based on Boruvka's MST algorithm.
* Here's the complete list of references:
* 1) V. Vineet et al, "Fast Minimum Spanning Tree for
* Large Graphs on the GPU";
* 2) P. Felzenszwalb et al, "Efficient Graph-Based Image Segmentation";
* 3) A. Ion et al, "Considerations Regarding the Minimum Spanning
* Tree Pyramid Segmentation Method".
*/
// System includes.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// STL includes.
#include <iostream>
#include <fstream>
#include <iterator>
#include <vector>
#include <list>
#include <deque>
#include <algorithm>
// Thrust library includes.
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/for_each.h>
#include <thrust/reduce.h>
#include <thrust/unique.h>
#include <thrust/scan.h>
#include <thrust/copy.h>
#include <thrust/sequence.h>
#include <thrust/fill.h>
#include <thrust/sort.h>
#include <thrust/adjacent_difference.h>
#include <thrust/find.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
// Sample framework includes.
#include <helper_functions.h>
#include <helper_cuda.h>
// Project includes.
#include "common_hip.cuh"
// Kernels.
#include "kernels_hip.cuh"
using std::cin;
using std::cout;
using std::endl;
using std::vector;
using std::list;
using std::deque;
// Very simple von Neumann middle-square prng. rand() is different across
// various OS platforms, which makes testing and the output inconsistent.
int myrand(void)
{
static int seed = 72191;
char sq[22];
seed *= seed;
sprintf(sq, "%010d", seed);
// pull the middle 5 digits out of sq
sq[8] = 0;
seed = atoi(&sq[3]);
return seed;
}
// Simple memory pool class. It is nothing more than array of fixed-sized
// arrays.
template <typename T>
class DeviceMemoryPool
{
public:
// The parameters of the constructor are as follows:
// 1) uint chunkSize --- size of the particular array;
// 2) uint chunksCount --- number of fixed-sized arrays.
DeviceMemoryPool(uint chunkSize, uint chunksCount) :
chunkSize_(chunkSize)
{
chunkRawSize_ = (chunkSize * sizeof(T) + 511) & ~511;
try
{
basePtr_ =
thrust::device_malloc(chunkRawSize_ * chunksCount);
}
catch (thrust::system_error &e)
{
cout << "Pool memory allocation failed (" << e.what() << ")"
<< endl;
exit(EXIT_FAILURE);
}
for (uint chunkIndex = 0; chunkIndex < chunksCount; ++chunkIndex)
{
chunks_.push_back(
thrust::device_ptr<T>(
reinterpret_cast<T *>(
static_cast<char *>(basePtr_.get()) +
chunkRawSize_ * chunkIndex)));
}
}
~DeviceMemoryPool()
{
try
{
thrust::device_free(basePtr_);
}
catch (thrust::system_error &e)
{
cout << "Pool memory allocation failed (" << e.what() << ")"
<< endl;
exit(EXIT_FAILURE);
}
}
// Returns an address of the first available array
// in the memory pool.
thrust::device_ptr<T> get()
{
thrust::device_ptr<T> ptr(chunks_.back());
chunks_.pop_back();
return ptr;
}
// Pushes an address stored in "ptr" to the list
// of available arrays of the memory pool.
// It should be noted that it is user who is responsible for returning
// the previously requested memory to the appropriate pool.
inline void put(const thrust::device_ptr<T> &ptr)
{
chunks_.push_back(ptr);
}
uint totalFreeChunks() const
{
return chunks_.size();
}
private:
uint chunkSize_, chunkRawSize_;
thrust::device_ptr<void> basePtr_;
list< thrust::device_ptr<T> > chunks_;
};
// Graph structure.
struct Graph
{
Graph() {}
Graph(uint verticesCount, uint edgesCount) :
vertices(verticesCount),
edges(edgesCount),
weights(edgesCount)
{}
// This vector stores offsets for each vertex in "edges" and "weights"
// vectors. For example:
// "vertices[0]" is an index of the first outgoing edge of vertex #0,
// "vertices[1]" is an index of the first outgoing edge of vertex #1, etc.
vector<uint> vertices;
// This vector stores indices of endpoints of the corresponding edges.
// For example, "edges[vertices[0]]" is the first neighbouring vertex
// of vertex #0.
vector<uint> edges;
// This vector stores weights of the corresponding edges.
vector<float> weights;
};
// Simple segmentation tree class.
// Each level of the tree corresponds to the segmentation.
// See "Level" class for the details.
class Pyramid
{
public:
void addLevel(uint totalSuperNodes,
uint totalNodes,
thrust::device_ptr<uint> superVerticesOffsets,
thrust::device_ptr<uint> verticesIDs)
{
levels_.push_back(Level(totalSuperNodes, totalNodes));
levels_.back().buildFromDeviceData(superVerticesOffsets,
verticesIDs);
}
uint levelsCount() const
{
return static_cast<uint>(levels_.size());
}
void dump(uint width, uint height) const
{
char filename[256], format[256];
uint levelIndex = 0;
uint requiredDigitsCount =
static_cast<uint>(log10(static_cast<float>(levelsCount()))) +
1;
sprintf(format, "level_%%0%uu.ppm", requiredDigitsCount);
for (LevelsIterator level = levels_.rbegin();
level != levels_.rend();
++level, ++levelIndex)
{
sprintf(filename, format, levelIndex);
dumpLevel(level, width, height, filename);
}
}
private:
// Level of the segmentation tree.
class Level
{
public:
Level(uint totalSuperNodes, uint totalNodes) :
superNodesOffsets_(totalSuperNodes), nodes_(totalNodes)
{
}
void buildFromDeviceData(
thrust::device_ptr<uint> superVerticesOffsets,
thrust::device_ptr<uint> verticesIDs)
{
checkCudaErrors(
hipMemcpy(&(superNodesOffsets_[0]),
superVerticesOffsets.get(),
sizeof(uint) * superNodesOffsets_.size(),
hipMemcpyDeviceToHost));
checkCudaErrors(
hipMemcpy(&(nodes_[0]),
verticesIDs.get(),
sizeof(uint) * nodes_.size(),
hipMemcpyDeviceToHost));
}
private:
friend class Pyramid;
// The pair of the following vectors describes the
// relation between the consecutive levels.
// Consider an example. Let the index of the current level be n.
// Then nodes of level #(n-1) with indices stored in
// "nodes[superNodesOffsets_[0]]",
// "nodes[superNodesOffsets_[0] + 1]",
// ...,
// "nodes[superNodesOffsets_[1] - 1]"
// correspond to vertex #0 of level #n. An so on.
vector<uint> superNodesOffsets_;
vector<uint> nodes_;
};
typedef list<Level>::const_reverse_iterator LevelsIterator;
// Dumps level to the file "level_n.ppm" where n
// is index of the level. Segments are drawn in random colors.
void dumpLevel(LevelsIterator level,
uint width,
uint height,
const char *filename) const
{
uint levelIndex =
static_cast<uint>(std::distance(levels_.rbegin(), level));
deque< std::pair<uint, uint> > nodesQueue;
uint totalSegments;
{
const vector<uint> &superNodesOffsets =
level->superNodesOffsets_;
const vector<uint> &nodes =
level->nodes_;
totalSegments = static_cast<uint>(superNodesOffsets.size());
for (uint superNodeIndex = 0, nodeIndex = 0;
superNodeIndex < superNodesOffsets.size();
++superNodeIndex)
{
uint superNodeEnd =
superNodeIndex + 1 < superNodesOffsets.size() ?
superNodesOffsets[superNodeIndex + 1] :
static_cast<uint>(nodes.size());
for (; nodeIndex < superNodeEnd; ++nodeIndex)
{
nodesQueue.push_back(std::make_pair(nodes[nodeIndex],
superNodeIndex));
}
}
}
++level;
while (level != levels_.rend())
{
uint superNodesCount = static_cast<uint>(nodesQueue.size());
const vector<uint> &superNodesOffsets =
level->superNodesOffsets_;
const vector<uint> &nodes =
level->nodes_;
while (superNodesCount--)
{
std::pair<uint, uint> currentNode = nodesQueue.front();
nodesQueue.pop_front();
uint superNodeBegin = superNodesOffsets[currentNode.first];
uint superNodeEnd =
currentNode.first + 1 < superNodesOffsets.size() ?
superNodesOffsets[currentNode.first + 1] :
static_cast<uint>(nodes.size());
for (uint nodeIndex = superNodeBegin;
nodeIndex < superNodeEnd;
++nodeIndex)
{
nodesQueue.push_back(
std::make_pair(nodes[nodeIndex],
currentNode.second));
}
}
++level;
}
vector<uint> colors(3 * totalSegments);
for (uint colorIndex = 0; colorIndex < totalSegments; ++colorIndex)
{
colors[colorIndex * 3 ] = myrand() % 256;
colors[colorIndex * 3 + 1] = myrand() % 256;
colors[colorIndex * 3 + 2] = myrand() % 256;
}
uchar *image = new uchar[width * height * 3];
while (!nodesQueue.empty())
{
std::pair<uint, uint> currentNode = nodesQueue.front();
nodesQueue.pop_front();
uint pixelIndex = currentNode.first;
uint pixelSegment = currentNode.second;
image[pixelIndex * 3 ] = colors[pixelSegment * 3 ];
image[pixelIndex * 3 + 1] = colors[pixelSegment * 3 + 1];
image[pixelIndex * 3 + 2] = colors[pixelSegment * 3 + 2];
}
__savePPM(filename, image, width, height, 3);
delete[] image;
}
list<Level> levels_;
};
// The class that encapsulates the main algorithm.
class SegmentationTreeBuilder
{
public:
SegmentationTreeBuilder() {}
~SegmentationTreeBuilder() {}
// Repeatedly invokes the step of the algorithm
// until the limiting segmentation is found.
// Returns time (in ms) spent on building the tree.
float run(const Graph &graph, Pyramid &segmentations)
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// Allocate requred memory pools. We need just 4 types of arrays.
MemoryPoolsCollection pools =
{
DeviceMemoryPool<uint>(
static_cast<uint>(graph.vertices.size()),
kUintVerticesPoolsRequired),
DeviceMemoryPool<float>(
static_cast<uint>(graph.vertices.size()),
kFloatVerticesPoolsRequired),
DeviceMemoryPool<uint>(
static_cast<uint>(graph.edges.size()),
kUintEdgesPoolsRequired),
DeviceMemoryPool<float>(
static_cast<uint>(graph.edges.size()),
kFloatEdgesPoolsRequired)
};
// Initialize internal variables
try
{
initalizeData(graph, pools);
}
catch (thrust::system_error &e)
{
cout << "Initialization failed (" << e.what() << ")" << endl;
exit(EXIT_FAILURE);
}
// Run steps
AlgorithmStatus status;
try
{
do
{
status = invokeStep(pools, segmentations);
}
while (status != ALGORITHM_FINISHED);
}
catch (thrust::system_error &e)
{
cout << "Algorithm failed (" << e.what() << ")" << endl;
exit(EXIT_FAILURE);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
return elapsedTime;
}
private:
void printMemoryUsage()
{
size_t availableMemory, totalMemory, usedMemory;
hipMemGetInfo(&availableMemory, &totalMemory);
usedMemory = totalMemory - availableMemory;
cout << "Device memory: used " << usedMemory
<< " available " << availableMemory
<< " total " << totalMemory << endl;
}
struct MemoryPoolsCollection
{
DeviceMemoryPool<uint> uintVertices;
DeviceMemoryPool<float> floatVertices;
DeviceMemoryPool<uint> uintEdges;
DeviceMemoryPool<float> floatEdges;
};
static const uint kUintVerticesPoolsRequired = 8;
static const uint kFloatVerticesPoolsRequired = 3;
static const uint kUintEdgesPoolsRequired = 8;
static const uint kFloatEdgesPoolsRequired = 4;
void initalizeData(const Graph &graph, MemoryPoolsCollection &pools)
{
// Get memory for the internal variables
verticesCount_ = static_cast<uint>(graph.vertices.size());
edgesCount_ = static_cast<uint>(graph.edges.size());
dVertices_ = pools.uintVertices.get();
dEdges_ = pools.uintEdges.get();
dWeights_ = pools.floatEdges.get();
dOutputEdgesFlags_ = pools.uintEdges.get();
// Copy graph to the device memory
checkCudaErrors(hipMemcpy(dVertices_.get(),
&(graph.vertices[0]),
sizeof(uint) * verticesCount_,
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dEdges_.get(),
&(graph.edges[0]),
sizeof(uint) * edgesCount_,
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dWeights_.get(),
&(graph.weights[0]),
sizeof(float) * edgesCount_,
hipMemcpyHostToDevice));
thrust::fill(dOutputEdgesFlags_,
dOutputEdgesFlags_ + edgesCount_,
0);
}
static const uint kMaxThreadsPerBlock = 256;
// Calculates grid parameters of the consecutive kernel calls
// based on the number of elements in the array.
void calculateThreadsDistribution(uint totalElements,
uint &blocksCount,
uint &threadsPerBlockCount)
{
if (totalElements > kMaxThreadsPerBlock)
{
blocksCount =
(totalElements + kMaxThreadsPerBlock - 1) /
kMaxThreadsPerBlock;
threadsPerBlockCount = kMaxThreadsPerBlock;
}
else
{
blocksCount = 1;
threadsPerBlockCount = totalElements;
}
}
enum AlgorithmStatus { ALGORITHM_NOT_FINISHED, ALGORITHM_FINISHED };
AlgorithmStatus invokeStep(MemoryPoolsCollection &pools,
Pyramid &segmentations)
{
uint blocksCount, threadsPerBlockCount;
calculateThreadsDistribution(edgesCount_,
blocksCount,
threadsPerBlockCount);
dim3 gridDimsForEdges(blocksCount, 1, 1);
dim3 blockDimsForEdges(threadsPerBlockCount, 1, 1);
calculateThreadsDistribution(verticesCount_,
blocksCount,
threadsPerBlockCount);
dim3 gridDimsForVertices(blocksCount, 1, 1);
dim3 blockDimsForVertices(threadsPerBlockCount, 1, 1);
thrust::device_ptr<uint> dEdgesFlags = pools.uintEdges.get();
thrust::fill(dEdgesFlags, dEdgesFlags + edgesCount_, 0);
// Mark the first edge for each vertex in "dEdgesFlags"
hipLaunchKernelGGL(( markSegments), dim3(gridDimsForVertices), dim3(blockDimsForVertices), 0 , 0,
dVertices_.get(), dEdgesFlags.get(), verticesCount_);
getLastCudaError("markSegments launch failed.");
// Now find minimum edges for each vertex.
thrust::device_ptr<uint> dMinScannedEdges =
pools.uintEdges.get();
thrust::device_ptr<float> dMinScannedWeights =
pools.floatEdges.get();
thrust::inclusive_scan_by_key(
dEdgesFlags,
dEdgesFlags + edgesCount_,
thrust::make_zip_iterator(
thrust::make_tuple(dWeights_, dEdges_)),
thrust::make_zip_iterator(
thrust::make_tuple(dMinScannedWeights, dMinScannedEdges)),
thrust::greater_equal<uint>(),
thrust::minimum< thrust::tuple<float, uint> >());
// To make things clear.
// Let "dEdgesFlags" denote groups of edges that
// correspond to the same vertices. Then the last edge of each group
// (in "dMinScannedEdges" and "dMinScannedWeights") is now minimal.
// Calculate a successor vertex for each vertex. A successor of the
// vertex v is a neighbouring vertex connected to v
// by the minimal edge.
thrust::device_ptr<uint> dSuccessors = pools.uintVertices.get();
hipLaunchKernelGGL(( getSuccessors), dim3(gridDimsForVertices), dim3(blockDimsForVertices), 0 , 0,
dVertices_.get(),
dMinScannedEdges.get(),
dSuccessors.get(),
verticesCount_,
edgesCount_);
getLastCudaError("getSuccessors launch failed.");
pools.uintEdges.put(dMinScannedEdges);
pools.floatEdges.put(dMinScannedWeights);
// Remove cyclic successor dependencies. Note that there can be only
// two vertices in a cycle. See [1] for details.
hipLaunchKernelGGL(( removeCycles), dim3(gridDimsForVertices), dim3(blockDimsForVertices), 0 , 0,
dSuccessors.get(), verticesCount_);
getLastCudaError("removeCycles launch failed.");
// Build up an array of startpoints for edges. As already stated,
// each group of edges denoted by "dEdgesFlags"
// has the same startpoint.
thrust::device_ptr<uint> dStartpoints = pools.uintEdges.get();
thrust::inclusive_scan(dEdgesFlags,
dEdgesFlags + edgesCount_,
dStartpoints);
hipLaunchKernelGGL(( addScalar), dim3(gridDimsForEdges), dim3(blockDimsForEdges), 0 , 0,
dStartpoints.get(), -1, edgesCount_);
getLastCudaError("addScalar launch failed.");
// Shrink the chains of successors. New successors will eventually
// represent superpixels of the new level.
thrust::device_ptr<uint> dRepresentatives =
pools.uintVertices.get();
hipLaunchKernelGGL(( getRepresentatives)
, dim3(gridDimsForVertices), dim3(blockDimsForVertices), 0 , 0,
dSuccessors.get(),
dRepresentatives.get(),
verticesCount_);
getLastCudaError("getRepresentatives launch failed.");
swap(dSuccessors, dRepresentatives);
pools.uintVertices.put(dRepresentatives);
// Group vertices by successors' indices.
thrust::device_ptr<uint> dClusteredVerticesIDs =
pools.uintVertices.get();
thrust::sequence(dClusteredVerticesIDs,
dClusteredVerticesIDs + verticesCount_);
thrust::sort(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::device_ptr<uint> (dSuccessors),
thrust::device_ptr<uint> (dClusteredVerticesIDs))),
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::device_ptr<uint> (dSuccessors +
verticesCount_),
thrust::device_ptr<uint> (dClusteredVerticesIDs +
verticesCount_))));
// Mark those groups.
thrust::device_ptr<uint> dVerticesFlags_ = pools.uintVertices.get();
thrust::fill(dVerticesFlags_, dVerticesFlags_ + verticesCount_, 0);
thrust::adjacent_difference(dSuccessors,
dSuccessors + verticesCount_,
dVerticesFlags_,
thrust::not_equal_to<uint>());
hipMemset((void *) dVerticesFlags_.get(), 0, sizeof(uint));
// Assign new indices to the successors (the indices of vertices
// at the new level).
thrust::device_ptr<uint> dNewVerticesIDs_ =
pools.uintVertices.get();
thrust::inclusive_scan(dVerticesFlags_,
dVerticesFlags_ + verticesCount_,
dNewVerticesIDs_);
pools.uintVertices.put(dVerticesFlags_);
// Now we can calculate number of resulting superpixels easily.
uint newVerticesCount;
hipMemcpy(&newVerticesCount,
(dNewVerticesIDs_ + verticesCount_ - 1).get(),
sizeof(uint),
hipMemcpyDeviceToHost);
++newVerticesCount;
// There are two special cases when we can stop our algorithm:
// 1) number of vertices in the graph remained unchanged;
// 2) only one vertex remains.
if (newVerticesCount == verticesCount_)
{
return ALGORITHM_FINISHED;
}
else if (newVerticesCount == 1)
{
thrust::device_ptr<uint> dDummyVerticesOffsets =
pools.uintVertices.get();
hipMemset((void *) dDummyVerticesOffsets.get(),
0,
sizeof(uint));
thrust::device_ptr<uint> dDummyVerticesIDs =
pools.uintVertices.get();
thrust::sequence(dDummyVerticesIDs,
dDummyVerticesIDs + verticesCount_);
segmentations.addLevel(1,
verticesCount_,
dDummyVerticesOffsets,
dDummyVerticesIDs);
return ALGORITHM_FINISHED;
}
// Calculate how old vertices IDs map to new vertices IDs.
thrust::device_ptr<uint> dVerticesMapping =
pools.uintVertices.get();
hipLaunchKernelGGL(( getVerticesMapping)
, dim3(gridDimsForVertices), dim3(blockDimsForVertices), 0 , 0,
dClusteredVerticesIDs.get(),
dNewVerticesIDs_.get(),
dVerticesMapping.get(),
verticesCount_);
getLastCudaError("getVerticesMapping launch failed.");
pools.uintVertices.put(dNewVerticesIDs_);
pools.uintVertices.put(dClusteredVerticesIDs);
pools.uintVertices.put(dSuccessors);
// Invalidate self-loops in the reduced graph (the graph
// produced by merging all old vertices that have
// the same successor).
hipLaunchKernelGGL(( invalidateLoops), dim3(gridDimsForEdges), dim3(blockDimsForEdges), 0 , 0,
dStartpoints.get(),
dVerticesMapping.get(),
dEdges_.get(),
edgesCount_);
getLastCudaError("invalidateLoops launch failed.");
// Calculate various information about the surviving
// (new startpoints IDs and IDs of edges) and
// non-surviving/contracted edges (their weights).
thrust::device_ptr<uint> dNewStartpoints = pools.uintEdges.get();
thrust::device_ptr<uint> dSurvivedEdgesIDs = pools.uintEdges.get();
hipLaunchKernelGGL(( calculateEdgesInfo), dim3(gridDimsForEdges), dim3(blockDimsForEdges), 0 , 0,
dStartpoints.get(),
dVerticesMapping.get(),
dEdges_.get(),
dWeights_.get(),
dNewStartpoints.get(),
dSurvivedEdgesIDs.get(),
edgesCount_,
newVerticesCount);
getLastCudaError("calculateEdgesInfo launch failed.");
pools.uintEdges.put(dStartpoints);
// Group that information by the new startpoints IDs.
// Keep in mind that we want to build new (reduced) graph and apply
// the step of the algorithm to that one. Hence we need to
// preserve the structure of the original graph: neighbours and
// weights should be grouped by vertex.
thrust::sort(
thrust::make_zip_iterator(
thrust::make_tuple(dNewStartpoints,
dSurvivedEdgesIDs)),
thrust::make_zip_iterator(
thrust::make_tuple(dNewStartpoints + edgesCount_,
dSurvivedEdgesIDs + edgesCount_)));
// Find the group of contracted edges.
uint *invalidEdgesPtr =
thrust::find_if(
dNewStartpoints,
dNewStartpoints + edgesCount_,
IsGreaterEqualThan<uint>(newVerticesCount)).get();
// Calculate how many edes there are in the reduced graph.
uint validEdgesCount =
static_cast<uint>(invalidEdgesPtr - dNewStartpoints.get());
// Mark groups of edges correspoding to the same vertex in the
// reduced graph.
thrust::adjacent_difference(dNewStartpoints,
dNewStartpoints + edgesCount_,
dEdgesFlags,
thrust::not_equal_to<uint>());
hipMemset((void *) dEdgesFlags.get(), 0, sizeof(uint));
hipMemset((void *) dEdgesFlags.get(), 1, 1);
pools.uintEdges.put(dNewStartpoints);
// Now we are able to build the reduced graph. See "Graph"
// class for the details on the graph's internal structure.
// Calculate vertices' offsets for the reduced graph.
uint *verticesEndPtr =
thrust::copy_if(thrust::make_counting_iterator(0U),
thrust::make_counting_iterator(validEdgesCount),
dEdgesFlags,
dVertices_,
thrust::identity<uint>()).get();
pools.uintEdges.put(dEdgesFlags);
// Build up a neighbourhood for each vertex in the reduced graph
// (this includes recalculating edges' weights).
calculateThreadsDistribution(validEdgesCount,
blocksCount,
threadsPerBlockCount);
dim3 newGridDimsForEdges(blocksCount, 1, 1);
dim3 newBlockDimsForEdges(threadsPerBlockCount, 1, 1);
thrust::device_ptr<uint> dNewEdges = pools.uintEdges.get();
thrust::device_ptr<float> dNewWeights = pools.floatEdges.get();
hipLaunchKernelGGL(( makeNewEdges), dim3(newGridDimsForEdges),
dim3(newBlockDimsForEdges),
0 , 0,
dSurvivedEdgesIDs.get(),
dVerticesMapping.get(),
dEdges_.get(),
dWeights_.get(),
dNewEdges.get(),
dNewWeights.get(),
validEdgesCount);
getLastCudaError("makeNewEdges launch failed.");
swap(dEdges_, dNewEdges);
swap(dWeights_, dNewWeights);
pools.uintEdges.put(dNewEdges);
pools.floatEdges.put(dNewWeights);
pools.uintEdges.put(dSurvivedEdgesIDs);
// The graph's reconstruction is now finished.
// Build new level of the segementation tree. It is a trivial task
// as we already have "dVerticesMapping" that contains all
// sufficient information about the vertices' transformations.
thrust::device_ptr<uint> dVerticesIDs =
pools.uintVertices.get();
thrust::device_ptr<uint> dNewVerticesOffsets =
pools.uintVertices.get();
thrust::sequence(dVerticesIDs, dVerticesIDs + verticesCount_);
thrust::sort_by_key(dVerticesMapping,
dVerticesMapping + verticesCount_,
dVerticesIDs);
thrust::unique_by_key_copy(dVerticesMapping,
dVerticesMapping + verticesCount_,
thrust::make_counting_iterator(0),
thrust::make_discard_iterator(),
dNewVerticesOffsets);
segmentations.addLevel(newVerticesCount,
verticesCount_,
dNewVerticesOffsets,
dVerticesIDs);
pools.uintVertices.put(dVerticesIDs);
pools.uintVertices.put(dNewVerticesOffsets);
pools.uintVertices.put(dVerticesMapping);
// We can now safely set new counts for vertices and edges.
verticesCount_ = newVerticesCount;
edgesCount_ = validEdgesCount;
return ALGORITHM_NOT_FINISHED;
}
uint verticesCount_;
uint edgesCount_;
thrust::device_ptr<uint> dVertices_;
thrust::device_ptr<uint> dEdges_;
thrust::device_ptr<float> dWeights_;
thrust::device_ptr<uint> dOutputEdgesFlags_;
};
// Loads PPM image.
int loadImage(const char *filename,
const char *executablePath,
vector<uchar3> &data,
uint &width,
uint &height)
{
const char *imagePath = sdkFindFilePath(filename, executablePath);
if (imagePath == NULL)
{
return -1;
}
uchar *dataHandle = NULL;
unsigned int channels;
if (!__loadPPM(imagePath, &dataHandle, &width, &height, &channels))
{
return -1;
}
data.assign(reinterpret_cast<uchar3 *>(dataHandle),
reinterpret_cast<uchar3 *>(dataHandle) + width * height);
free(reinterpret_cast<void *>(dataHandle));
return 0;
}
inline float distance(const uchar3 &first, const uchar3 &second)
{
int dx = static_cast<int>(first.x) - static_cast<int>(second.x);
int dy = static_cast<int>(first.y) - static_cast<int>(second.y);
int dz = static_cast<int>(first.z) - static_cast<int>(second.z);
uint sqrResult = dx * dx + dy * dy + dz * dz;
return sqrt(static_cast<float>(sqrResult));
}
// Builds a net-graph for the image with 4-connected pixels.
void buildGraph(const vector<uchar3> &image,
uint width,
uint height,
Graph &graph)
{
uint totalNodes = static_cast<uint>(image.size());
graph.vertices.resize(totalNodes);
graph.edges.reserve(4 * totalNodes - 2 * (width + height));
graph.weights.reserve(graph.edges.size());
uint edgesProcessed = 0;
for (uint y = 0; y < height; ++y)
{
for (uint x = 0; x < width; ++x)
{
uint nodeIndex = y * width + x;
const uchar3 ¢erPixel = image[nodeIndex];
graph.vertices[nodeIndex] = edgesProcessed;
if (y > 0)
{
uint lowerNodeIndex = (y - 1) * width + x;
const uchar3 &lowerPixel = image[lowerNodeIndex];
graph.edges.push_back(lowerNodeIndex);
graph.weights.push_back(distance(centerPixel, lowerPixel));
++edgesProcessed;
}
if (y + 1 < height)
{
uint upperNodeIndex = (y + 1) * width + x;
const uchar3 &upperPixel = image[upperNodeIndex];
graph.edges.push_back(upperNodeIndex);
graph.weights.push_back(distance(centerPixel, upperPixel));
++edgesProcessed;
}
if (x > 0)
{
uint leftNodeIndex = y * width + x - 1;
const uchar3 &leftPixel = image[leftNodeIndex];
graph.edges.push_back(leftNodeIndex);
graph.weights.push_back(distance(centerPixel, leftPixel));
++edgesProcessed;
}
if (x + 1 < width)
{
uint rightNodeIndex = y * width + x + 1;
const uchar3 &rightPixel = image[rightNodeIndex];
graph.edges.push_back(rightNodeIndex);
graph.weights.push_back(distance(centerPixel, rightPixel));
++edgesProcessed;
}
}
}
}
static char *kDefaultImageName = "test.ppm";
int main(int argc, char **argv)
{
vector<uchar3> image;
uint imageWidth, imageHeight;
char *imageName;
printf("%s Starting...\n\n", argv[0]);
imageName = kDefaultImageName;
if (checkCmdLineFlag(argc, (const char **) argv, "file"))
{
getCmdLineArgumentString(argc,
(const char **) argv,
"file",
&imageName);
}
if (loadImage(imageName, argv[0], image, imageWidth, imageHeight) != 0)
{
printf("Failed to open <%s>, program exit...\n", imageName);
exit(EXIT_FAILURE);
}
int devID = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
// This requires a minimum of SM 1.3 to be able to run
if ((deviceProp.major > 1) ||
(deviceProp.major == 1 && deviceProp.minor >= 3))
{
Graph graph;
buildGraph(image, imageWidth, imageHeight, graph);
Pyramid segmentations;
cout << "* Building segmentation tree... ";
cout.flush();
SegmentationTreeBuilder algo;
float elapsedTime = algo.run(graph, segmentations);
cout << "done in " << elapsedTime << " (ms)" << endl;
cout << "* Dumping levels for each tree..." << endl << endl;
segmentations.dump(imageWidth, imageHeight);
bool bResults[2];
bResults[0] = sdkComparePPM("level_00.ppm",
sdkFindFilePath("ref_00.ppm", argv[0]),
5.0f,
0.15f,
false);
bResults[1] = sdkComparePPM("level_09.ppm",
sdkFindFilePath("ref_09.ppm", argv[0]),
5.0f,
0.15f,
false);
hipDeviceReset();
exit((bResults[0] && bResults[1]) ? EXIT_SUCCESS : EXIT_FAILURE);
}
else
{
printf("segmentationTreeThrust requires a GPU with Compute Capability "
"1.3 or higher, exiting...\n\n");
hipDeviceReset();
exit(EXIT_SUCCESS);
}
}
| 9340eceae64a2f64f48f87ed47fabd9c378020c4.cu | /*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This application demonstrates an approach to the image segmentation
* trees construction. It is based on Boruvka's MST algorithm.
* Here's the complete list of references:
* 1) V. Vineet et al, "Fast Minimum Spanning Tree for
* Large Graphs on the GPU";
* 2) P. Felzenszwalb et al, "Efficient Graph-Based Image Segmentation";
* 3) A. Ion et al, "Considerations Regarding the Minimum Spanning
* Tree Pyramid Segmentation Method".
*/
// System includes.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// STL includes.
#include <iostream>
#include <fstream>
#include <iterator>
#include <vector>
#include <list>
#include <deque>
#include <algorithm>
// Thrust library includes.
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/for_each.h>
#include <thrust/reduce.h>
#include <thrust/unique.h>
#include <thrust/scan.h>
#include <thrust/copy.h>
#include <thrust/sequence.h>
#include <thrust/fill.h>
#include <thrust/sort.h>
#include <thrust/adjacent_difference.h>
#include <thrust/find.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
// Sample framework includes.
#include <helper_functions.h>
#include <helper_cuda.h>
// Project includes.
#include "common.cuh"
// Kernels.
#include "kernels.cuh"
using std::cin;
using std::cout;
using std::endl;
using std::vector;
using std::list;
using std::deque;
// Very simple von Neumann middle-square prng. rand() is different across
// various OS platforms, which makes testing and the output inconsistent.
int myrand(void)
{
static int seed = 72191;
char sq[22];
seed *= seed;
sprintf(sq, "%010d", seed);
// pull the middle 5 digits out of sq
sq[8] = 0;
seed = atoi(&sq[3]);
return seed;
}
// Simple memory pool class. It is nothing more than array of fixed-sized
// arrays.
template <typename T>
class DeviceMemoryPool
{
public:
// The parameters of the constructor are as follows:
// 1) uint chunkSize --- size of the particular array;
// 2) uint chunksCount --- number of fixed-sized arrays.
DeviceMemoryPool(uint chunkSize, uint chunksCount) :
chunkSize_(chunkSize)
{
chunkRawSize_ = (chunkSize * sizeof(T) + 511) & ~511;
try
{
basePtr_ =
thrust::device_malloc(chunkRawSize_ * chunksCount);
}
catch (thrust::system_error &e)
{
cout << "Pool memory allocation failed (" << e.what() << ")"
<< endl;
exit(EXIT_FAILURE);
}
for (uint chunkIndex = 0; chunkIndex < chunksCount; ++chunkIndex)
{
chunks_.push_back(
thrust::device_ptr<T>(
reinterpret_cast<T *>(
static_cast<char *>(basePtr_.get()) +
chunkRawSize_ * chunkIndex)));
}
}
~DeviceMemoryPool()
{
try
{
thrust::device_free(basePtr_);
}
catch (thrust::system_error &e)
{
cout << "Pool memory allocation failed (" << e.what() << ")"
<< endl;
exit(EXIT_FAILURE);
}
}
// Returns an address of the first available array
// in the memory pool.
thrust::device_ptr<T> get()
{
thrust::device_ptr<T> ptr(chunks_.back());
chunks_.pop_back();
return ptr;
}
// Pushes an address stored in "ptr" to the list
// of available arrays of the memory pool.
// It should be noted that it is user who is responsible for returning
// the previously requested memory to the appropriate pool.
inline void put(const thrust::device_ptr<T> &ptr)
{
chunks_.push_back(ptr);
}
uint totalFreeChunks() const
{
return chunks_.size();
}
private:
uint chunkSize_, chunkRawSize_;
thrust::device_ptr<void> basePtr_;
list< thrust::device_ptr<T> > chunks_;
};
// Graph structure.
struct Graph
{
Graph() {}
Graph(uint verticesCount, uint edgesCount) :
vertices(verticesCount),
edges(edgesCount),
weights(edgesCount)
{}
// This vector stores offsets for each vertex in "edges" and "weights"
// vectors. For example:
// "vertices[0]" is an index of the first outgoing edge of vertex #0,
// "vertices[1]" is an index of the first outgoing edge of vertex #1, etc.
vector<uint> vertices;
// This vector stores indices of endpoints of the corresponding edges.
// For example, "edges[vertices[0]]" is the first neighbouring vertex
// of vertex #0.
vector<uint> edges;
// This vector stores weights of the corresponding edges.
vector<float> weights;
};
// Simple segmentation tree class.
// Each level of the tree corresponds to the segmentation.
// See "Level" class for the details.
class Pyramid
{
public:
void addLevel(uint totalSuperNodes,
uint totalNodes,
thrust::device_ptr<uint> superVerticesOffsets,
thrust::device_ptr<uint> verticesIDs)
{
levels_.push_back(Level(totalSuperNodes, totalNodes));
levels_.back().buildFromDeviceData(superVerticesOffsets,
verticesIDs);
}
uint levelsCount() const
{
return static_cast<uint>(levels_.size());
}
void dump(uint width, uint height) const
{
char filename[256], format[256];
uint levelIndex = 0;
uint requiredDigitsCount =
static_cast<uint>(log10(static_cast<float>(levelsCount()))) +
1;
sprintf(format, "level_%%0%uu.ppm", requiredDigitsCount);
for (LevelsIterator level = levels_.rbegin();
level != levels_.rend();
++level, ++levelIndex)
{
sprintf(filename, format, levelIndex);
dumpLevel(level, width, height, filename);
}
}
private:
// Level of the segmentation tree.
class Level
{
public:
Level(uint totalSuperNodes, uint totalNodes) :
superNodesOffsets_(totalSuperNodes), nodes_(totalNodes)
{
}
void buildFromDeviceData(
thrust::device_ptr<uint> superVerticesOffsets,
thrust::device_ptr<uint> verticesIDs)
{
checkCudaErrors(
cudaMemcpy(&(superNodesOffsets_[0]),
superVerticesOffsets.get(),
sizeof(uint) * superNodesOffsets_.size(),
cudaMemcpyDeviceToHost));
checkCudaErrors(
cudaMemcpy(&(nodes_[0]),
verticesIDs.get(),
sizeof(uint) * nodes_.size(),
cudaMemcpyDeviceToHost));
}
private:
friend class Pyramid;
// The pair of the following vectors describes the
// relation between the consecutive levels.
// Consider an example. Let the index of the current level be n.
// Then nodes of level #(n-1) with indices stored in
// "nodes[superNodesOffsets_[0]]",
// "nodes[superNodesOffsets_[0] + 1]",
// ...,
// "nodes[superNodesOffsets_[1] - 1]"
// correspond to vertex #0 of level #n. An so on.
vector<uint> superNodesOffsets_;
vector<uint> nodes_;
};
typedef list<Level>::const_reverse_iterator LevelsIterator;
// Dumps level to the file "level_n.ppm" where n
// is index of the level. Segments are drawn in random colors.
void dumpLevel(LevelsIterator level,
uint width,
uint height,
const char *filename) const
{
uint levelIndex =
static_cast<uint>(std::distance(levels_.rbegin(), level));
deque< std::pair<uint, uint> > nodesQueue;
uint totalSegments;
{
const vector<uint> &superNodesOffsets =
level->superNodesOffsets_;
const vector<uint> &nodes =
level->nodes_;
totalSegments = static_cast<uint>(superNodesOffsets.size());
for (uint superNodeIndex = 0, nodeIndex = 0;
superNodeIndex < superNodesOffsets.size();
++superNodeIndex)
{
uint superNodeEnd =
superNodeIndex + 1 < superNodesOffsets.size() ?
superNodesOffsets[superNodeIndex + 1] :
static_cast<uint>(nodes.size());
for (; nodeIndex < superNodeEnd; ++nodeIndex)
{
nodesQueue.push_back(std::make_pair(nodes[nodeIndex],
superNodeIndex));
}
}
}
++level;
while (level != levels_.rend())
{
uint superNodesCount = static_cast<uint>(nodesQueue.size());
const vector<uint> &superNodesOffsets =
level->superNodesOffsets_;
const vector<uint> &nodes =
level->nodes_;
while (superNodesCount--)
{
std::pair<uint, uint> currentNode = nodesQueue.front();
nodesQueue.pop_front();
uint superNodeBegin = superNodesOffsets[currentNode.first];
uint superNodeEnd =
currentNode.first + 1 < superNodesOffsets.size() ?
superNodesOffsets[currentNode.first + 1] :
static_cast<uint>(nodes.size());
for (uint nodeIndex = superNodeBegin;
nodeIndex < superNodeEnd;
++nodeIndex)
{
nodesQueue.push_back(
std::make_pair(nodes[nodeIndex],
currentNode.second));
}
}
++level;
}
vector<uint> colors(3 * totalSegments);
for (uint colorIndex = 0; colorIndex < totalSegments; ++colorIndex)
{
colors[colorIndex * 3 ] = myrand() % 256;
colors[colorIndex * 3 + 1] = myrand() % 256;
colors[colorIndex * 3 + 2] = myrand() % 256;
}
uchar *image = new uchar[width * height * 3];
while (!nodesQueue.empty())
{
std::pair<uint, uint> currentNode = nodesQueue.front();
nodesQueue.pop_front();
uint pixelIndex = currentNode.first;
uint pixelSegment = currentNode.second;
image[pixelIndex * 3 ] = colors[pixelSegment * 3 ];
image[pixelIndex * 3 + 1] = colors[pixelSegment * 3 + 1];
image[pixelIndex * 3 + 2] = colors[pixelSegment * 3 + 2];
}
__savePPM(filename, image, width, height, 3);
delete[] image;
}
list<Level> levels_;
};
// The class that encapsulates the main algorithm.
class SegmentationTreeBuilder
{
public:
SegmentationTreeBuilder() {}
~SegmentationTreeBuilder() {}
// Repeatedly invokes the step of the algorithm
// until the limiting segmentation is found.
// Returns time (in ms) spent on building the tree.
float run(const Graph &graph, Pyramid &segmentations)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Allocate requred memory pools. We need just 4 types of arrays.
MemoryPoolsCollection pools =
{
DeviceMemoryPool<uint>(
static_cast<uint>(graph.vertices.size()),
kUintVerticesPoolsRequired),
DeviceMemoryPool<float>(
static_cast<uint>(graph.vertices.size()),
kFloatVerticesPoolsRequired),
DeviceMemoryPool<uint>(
static_cast<uint>(graph.edges.size()),
kUintEdgesPoolsRequired),
DeviceMemoryPool<float>(
static_cast<uint>(graph.edges.size()),
kFloatEdgesPoolsRequired)
};
// Initialize internal variables
try
{
initalizeData(graph, pools);
}
catch (thrust::system_error &e)
{
cout << "Initialization failed (" << e.what() << ")" << endl;
exit(EXIT_FAILURE);
}
// Run steps
AlgorithmStatus status;
try
{
do
{
status = invokeStep(pools, segmentations);
}
while (status != ALGORITHM_FINISHED);
}
catch (thrust::system_error &e)
{
cout << "Algorithm failed (" << e.what() << ")" << endl;
exit(EXIT_FAILURE);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
return elapsedTime;
}
private:
void printMemoryUsage()
{
size_t availableMemory, totalMemory, usedMemory;
cudaMemGetInfo(&availableMemory, &totalMemory);
usedMemory = totalMemory - availableMemory;
cout << "Device memory: used " << usedMemory
<< " available " << availableMemory
<< " total " << totalMemory << endl;
}
struct MemoryPoolsCollection
{
DeviceMemoryPool<uint> uintVertices;
DeviceMemoryPool<float> floatVertices;
DeviceMemoryPool<uint> uintEdges;
DeviceMemoryPool<float> floatEdges;
};
static const uint kUintVerticesPoolsRequired = 8;
static const uint kFloatVerticesPoolsRequired = 3;
static const uint kUintEdgesPoolsRequired = 8;
static const uint kFloatEdgesPoolsRequired = 4;
void initalizeData(const Graph &graph, MemoryPoolsCollection &pools)
{
// Get memory for the internal variables
verticesCount_ = static_cast<uint>(graph.vertices.size());
edgesCount_ = static_cast<uint>(graph.edges.size());
dVertices_ = pools.uintVertices.get();
dEdges_ = pools.uintEdges.get();
dWeights_ = pools.floatEdges.get();
dOutputEdgesFlags_ = pools.uintEdges.get();
// Copy graph to the device memory
checkCudaErrors(cudaMemcpy(dVertices_.get(),
&(graph.vertices[0]),
sizeof(uint) * verticesCount_,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dEdges_.get(),
&(graph.edges[0]),
sizeof(uint) * edgesCount_,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dWeights_.get(),
&(graph.weights[0]),
sizeof(float) * edgesCount_,
cudaMemcpyHostToDevice));
thrust::fill(dOutputEdgesFlags_,
dOutputEdgesFlags_ + edgesCount_,
0);
}
static const uint kMaxThreadsPerBlock = 256;
// Calculates grid parameters of the consecutive kernel calls
// based on the number of elements in the array.
void calculateThreadsDistribution(uint totalElements,
uint &blocksCount,
uint &threadsPerBlockCount)
{
if (totalElements > kMaxThreadsPerBlock)
{
blocksCount =
(totalElements + kMaxThreadsPerBlock - 1) /
kMaxThreadsPerBlock;
threadsPerBlockCount = kMaxThreadsPerBlock;
}
else
{
blocksCount = 1;
threadsPerBlockCount = totalElements;
}
}
enum AlgorithmStatus { ALGORITHM_NOT_FINISHED, ALGORITHM_FINISHED };
AlgorithmStatus invokeStep(MemoryPoolsCollection &pools,
Pyramid &segmentations)
{
uint blocksCount, threadsPerBlockCount;
calculateThreadsDistribution(edgesCount_,
blocksCount,
threadsPerBlockCount);
dim3 gridDimsForEdges(blocksCount, 1, 1);
dim3 blockDimsForEdges(threadsPerBlockCount, 1, 1);
calculateThreadsDistribution(verticesCount_,
blocksCount,
threadsPerBlockCount);
dim3 gridDimsForVertices(blocksCount, 1, 1);
dim3 blockDimsForVertices(threadsPerBlockCount, 1, 1);
thrust::device_ptr<uint> dEdgesFlags = pools.uintEdges.get();
thrust::fill(dEdgesFlags, dEdgesFlags + edgesCount_, 0);
// Mark the first edge for each vertex in "dEdgesFlags"
markSegments<<< gridDimsForVertices, blockDimsForVertices, 0 >>>
(dVertices_.get(), dEdgesFlags.get(), verticesCount_);
getLastCudaError("markSegments launch failed.");
// Now find minimum edges for each vertex.
thrust::device_ptr<uint> dMinScannedEdges =
pools.uintEdges.get();
thrust::device_ptr<float> dMinScannedWeights =
pools.floatEdges.get();
thrust::inclusive_scan_by_key(
dEdgesFlags,
dEdgesFlags + edgesCount_,
thrust::make_zip_iterator(
thrust::make_tuple(dWeights_, dEdges_)),
thrust::make_zip_iterator(
thrust::make_tuple(dMinScannedWeights, dMinScannedEdges)),
thrust::greater_equal<uint>(),
thrust::minimum< thrust::tuple<float, uint> >());
// To make things clear.
// Let "dEdgesFlags" denote groups of edges that
// correspond to the same vertices. Then the last edge of each group
// (in "dMinScannedEdges" and "dMinScannedWeights") is now minimal.
// Calculate a successor vertex for each vertex. A successor of the
// vertex v is a neighbouring vertex connected to v
// by the minimal edge.
thrust::device_ptr<uint> dSuccessors = pools.uintVertices.get();
getSuccessors<<< gridDimsForVertices, blockDimsForVertices, 0 >>>
(dVertices_.get(),
dMinScannedEdges.get(),
dSuccessors.get(),
verticesCount_,
edgesCount_);
getLastCudaError("getSuccessors launch failed.");
pools.uintEdges.put(dMinScannedEdges);
pools.floatEdges.put(dMinScannedWeights);
// Remove cyclic successor dependencies. Note that there can be only
// two vertices in a cycle. See [1] for details.
removeCycles<<< gridDimsForVertices, blockDimsForVertices, 0 >>>
(dSuccessors.get(), verticesCount_);
getLastCudaError("removeCycles launch failed.");
// Build up an array of startpoints for edges. As already stated,
// each group of edges denoted by "dEdgesFlags"
// has the same startpoint.
thrust::device_ptr<uint> dStartpoints = pools.uintEdges.get();
thrust::inclusive_scan(dEdgesFlags,
dEdgesFlags + edgesCount_,
dStartpoints);
addScalar<<< gridDimsForEdges, blockDimsForEdges, 0 >>>
(dStartpoints.get(), -1, edgesCount_);
getLastCudaError("addScalar launch failed.");
// Shrink the chains of successors. New successors will eventually
// represent superpixels of the new level.
thrust::device_ptr<uint> dRepresentatives =
pools.uintVertices.get();
getRepresentatives
<<< gridDimsForVertices, blockDimsForVertices, 0 >>>
(dSuccessors.get(),
dRepresentatives.get(),
verticesCount_);
getLastCudaError("getRepresentatives launch failed.");
swap(dSuccessors, dRepresentatives);
pools.uintVertices.put(dRepresentatives);
// Group vertices by successors' indices.
thrust::device_ptr<uint> dClusteredVerticesIDs =
pools.uintVertices.get();
thrust::sequence(dClusteredVerticesIDs,
dClusteredVerticesIDs + verticesCount_);
thrust::sort(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::device_ptr<uint> (dSuccessors),
thrust::device_ptr<uint> (dClusteredVerticesIDs))),
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::device_ptr<uint> (dSuccessors +
verticesCount_),
thrust::device_ptr<uint> (dClusteredVerticesIDs +
verticesCount_))));
// Mark those groups.
thrust::device_ptr<uint> dVerticesFlags_ = pools.uintVertices.get();
thrust::fill(dVerticesFlags_, dVerticesFlags_ + verticesCount_, 0);
thrust::adjacent_difference(dSuccessors,
dSuccessors + verticesCount_,
dVerticesFlags_,
thrust::not_equal_to<uint>());
cudaMemset((void *) dVerticesFlags_.get(), 0, sizeof(uint));
// Assign new indices to the successors (the indices of vertices
// at the new level).
thrust::device_ptr<uint> dNewVerticesIDs_ =
pools.uintVertices.get();
thrust::inclusive_scan(dVerticesFlags_,
dVerticesFlags_ + verticesCount_,
dNewVerticesIDs_);
pools.uintVertices.put(dVerticesFlags_);
// Now we can calculate number of resulting superpixels easily.
uint newVerticesCount;
cudaMemcpy(&newVerticesCount,
(dNewVerticesIDs_ + verticesCount_ - 1).get(),
sizeof(uint),
cudaMemcpyDeviceToHost);
++newVerticesCount;
// There are two special cases when we can stop our algorithm:
// 1) number of vertices in the graph remained unchanged;
// 2) only one vertex remains.
if (newVerticesCount == verticesCount_)
{
return ALGORITHM_FINISHED;
}
else if (newVerticesCount == 1)
{
thrust::device_ptr<uint> dDummyVerticesOffsets =
pools.uintVertices.get();
cudaMemset((void *) dDummyVerticesOffsets.get(),
0,
sizeof(uint));
thrust::device_ptr<uint> dDummyVerticesIDs =
pools.uintVertices.get();
thrust::sequence(dDummyVerticesIDs,
dDummyVerticesIDs + verticesCount_);
segmentations.addLevel(1,
verticesCount_,
dDummyVerticesOffsets,
dDummyVerticesIDs);
return ALGORITHM_FINISHED;
}
// Calculate how old vertices IDs map to new vertices IDs.
thrust::device_ptr<uint> dVerticesMapping =
pools.uintVertices.get();
getVerticesMapping
<<< gridDimsForVertices, blockDimsForVertices, 0 >>>
(dClusteredVerticesIDs.get(),
dNewVerticesIDs_.get(),
dVerticesMapping.get(),
verticesCount_);
getLastCudaError("getVerticesMapping launch failed.");
pools.uintVertices.put(dNewVerticesIDs_);
pools.uintVertices.put(dClusteredVerticesIDs);
pools.uintVertices.put(dSuccessors);
// Invalidate self-loops in the reduced graph (the graph
// produced by merging all old vertices that have
// the same successor).
invalidateLoops<<< gridDimsForEdges, blockDimsForEdges, 0 >>>
(dStartpoints.get(),
dVerticesMapping.get(),
dEdges_.get(),
edgesCount_);
getLastCudaError("invalidateLoops launch failed.");
// Calculate various information about the surviving
// (new startpoints IDs and IDs of edges) and
// non-surviving/contracted edges (their weights).
thrust::device_ptr<uint> dNewStartpoints = pools.uintEdges.get();
thrust::device_ptr<uint> dSurvivedEdgesIDs = pools.uintEdges.get();
calculateEdgesInfo<<< gridDimsForEdges, blockDimsForEdges, 0 >>>
(dStartpoints.get(),
dVerticesMapping.get(),
dEdges_.get(),
dWeights_.get(),
dNewStartpoints.get(),
dSurvivedEdgesIDs.get(),
edgesCount_,
newVerticesCount);
getLastCudaError("calculateEdgesInfo launch failed.");
pools.uintEdges.put(dStartpoints);
// Group that information by the new startpoints IDs.
// Keep in mind that we want to build new (reduced) graph and apply
// the step of the algorithm to that one. Hence we need to
// preserve the structure of the original graph: neighbours and
// weights should be grouped by vertex.
thrust::sort(
thrust::make_zip_iterator(
thrust::make_tuple(dNewStartpoints,
dSurvivedEdgesIDs)),
thrust::make_zip_iterator(
thrust::make_tuple(dNewStartpoints + edgesCount_,
dSurvivedEdgesIDs + edgesCount_)));
// Find the group of contracted edges.
uint *invalidEdgesPtr =
thrust::find_if(
dNewStartpoints,
dNewStartpoints + edgesCount_,
IsGreaterEqualThan<uint>(newVerticesCount)).get();
// Calculate how many edes there are in the reduced graph.
uint validEdgesCount =
static_cast<uint>(invalidEdgesPtr - dNewStartpoints.get());
// Mark groups of edges correspoding to the same vertex in the
// reduced graph.
thrust::adjacent_difference(dNewStartpoints,
dNewStartpoints + edgesCount_,
dEdgesFlags,
thrust::not_equal_to<uint>());
cudaMemset((void *) dEdgesFlags.get(), 0, sizeof(uint));
cudaMemset((void *) dEdgesFlags.get(), 1, 1);
pools.uintEdges.put(dNewStartpoints);
// Now we are able to build the reduced graph. See "Graph"
// class for the details on the graph's internal structure.
// Calculate vertices' offsets for the reduced graph.
uint *verticesEndPtr =
thrust::copy_if(thrust::make_counting_iterator(0U),
thrust::make_counting_iterator(validEdgesCount),
dEdgesFlags,
dVertices_,
thrust::identity<uint>()).get();
pools.uintEdges.put(dEdgesFlags);
// Build up a neighbourhood for each vertex in the reduced graph
// (this includes recalculating edges' weights).
calculateThreadsDistribution(validEdgesCount,
blocksCount,
threadsPerBlockCount);
dim3 newGridDimsForEdges(blocksCount, 1, 1);
dim3 newBlockDimsForEdges(threadsPerBlockCount, 1, 1);
thrust::device_ptr<uint> dNewEdges = pools.uintEdges.get();
thrust::device_ptr<float> dNewWeights = pools.floatEdges.get();
makeNewEdges<<< newGridDimsForEdges,
newBlockDimsForEdges,
0 >>>
(dSurvivedEdgesIDs.get(),
dVerticesMapping.get(),
dEdges_.get(),
dWeights_.get(),
dNewEdges.get(),
dNewWeights.get(),
validEdgesCount);
getLastCudaError("makeNewEdges launch failed.");
swap(dEdges_, dNewEdges);
swap(dWeights_, dNewWeights);
pools.uintEdges.put(dNewEdges);
pools.floatEdges.put(dNewWeights);
pools.uintEdges.put(dSurvivedEdgesIDs);
// The graph's reconstruction is now finished.
// Build new level of the segementation tree. It is a trivial task
// as we already have "dVerticesMapping" that contains all
// sufficient information about the vertices' transformations.
thrust::device_ptr<uint> dVerticesIDs =
pools.uintVertices.get();
thrust::device_ptr<uint> dNewVerticesOffsets =
pools.uintVertices.get();
thrust::sequence(dVerticesIDs, dVerticesIDs + verticesCount_);
thrust::sort_by_key(dVerticesMapping,
dVerticesMapping + verticesCount_,
dVerticesIDs);
thrust::unique_by_key_copy(dVerticesMapping,
dVerticesMapping + verticesCount_,
thrust::make_counting_iterator(0),
thrust::make_discard_iterator(),
dNewVerticesOffsets);
segmentations.addLevel(newVerticesCount,
verticesCount_,
dNewVerticesOffsets,
dVerticesIDs);
pools.uintVertices.put(dVerticesIDs);
pools.uintVertices.put(dNewVerticesOffsets);
pools.uintVertices.put(dVerticesMapping);
// We can now safely set new counts for vertices and edges.
verticesCount_ = newVerticesCount;
edgesCount_ = validEdgesCount;
return ALGORITHM_NOT_FINISHED;
}
uint verticesCount_;
uint edgesCount_;
thrust::device_ptr<uint> dVertices_;
thrust::device_ptr<uint> dEdges_;
thrust::device_ptr<float> dWeights_;
thrust::device_ptr<uint> dOutputEdgesFlags_;
};
// Loads PPM image.
int loadImage(const char *filename,
const char *executablePath,
vector<uchar3> &data,
uint &width,
uint &height)
{
const char *imagePath = sdkFindFilePath(filename, executablePath);
if (imagePath == NULL)
{
return -1;
}
uchar *dataHandle = NULL;
unsigned int channels;
if (!__loadPPM(imagePath, &dataHandle, &width, &height, &channels))
{
return -1;
}
data.assign(reinterpret_cast<uchar3 *>(dataHandle),
reinterpret_cast<uchar3 *>(dataHandle) + width * height);
free(reinterpret_cast<void *>(dataHandle));
return 0;
}
inline float distance(const uchar3 &first, const uchar3 &second)
{
int dx = static_cast<int>(first.x) - static_cast<int>(second.x);
int dy = static_cast<int>(first.y) - static_cast<int>(second.y);
int dz = static_cast<int>(first.z) - static_cast<int>(second.z);
uint sqrResult = dx * dx + dy * dy + dz * dz;
return sqrt(static_cast<float>(sqrResult));
}
// Builds a net-graph for the image with 4-connected pixels.
void buildGraph(const vector<uchar3> &image,
uint width,
uint height,
Graph &graph)
{
uint totalNodes = static_cast<uint>(image.size());
graph.vertices.resize(totalNodes);
graph.edges.reserve(4 * totalNodes - 2 * (width + height));
graph.weights.reserve(graph.edges.size());
uint edgesProcessed = 0;
for (uint y = 0; y < height; ++y)
{
for (uint x = 0; x < width; ++x)
{
uint nodeIndex = y * width + x;
const uchar3 ¢erPixel = image[nodeIndex];
graph.vertices[nodeIndex] = edgesProcessed;
if (y > 0)
{
uint lowerNodeIndex = (y - 1) * width + x;
const uchar3 &lowerPixel = image[lowerNodeIndex];
graph.edges.push_back(lowerNodeIndex);
graph.weights.push_back(distance(centerPixel, lowerPixel));
++edgesProcessed;
}
if (y + 1 < height)
{
uint upperNodeIndex = (y + 1) * width + x;
const uchar3 &upperPixel = image[upperNodeIndex];
graph.edges.push_back(upperNodeIndex);
graph.weights.push_back(distance(centerPixel, upperPixel));
++edgesProcessed;
}
if (x > 0)
{
uint leftNodeIndex = y * width + x - 1;
const uchar3 &leftPixel = image[leftNodeIndex];
graph.edges.push_back(leftNodeIndex);
graph.weights.push_back(distance(centerPixel, leftPixel));
++edgesProcessed;
}
if (x + 1 < width)
{
uint rightNodeIndex = y * width + x + 1;
const uchar3 &rightPixel = image[rightNodeIndex];
graph.edges.push_back(rightNodeIndex);
graph.weights.push_back(distance(centerPixel, rightPixel));
++edgesProcessed;
}
}
}
}
static char *kDefaultImageName = "test.ppm";
int main(int argc, char **argv)
{
vector<uchar3> image;
uint imageWidth, imageHeight;
char *imageName;
printf("%s Starting...\n\n", argv[0]);
imageName = kDefaultImageName;
if (checkCmdLineFlag(argc, (const char **) argv, "file"))
{
getCmdLineArgumentString(argc,
(const char **) argv,
"file",
&imageName);
}
if (loadImage(imageName, argv[0], image, imageWidth, imageHeight) != 0)
{
printf("Failed to open <%s>, program exit...\n", imageName);
exit(EXIT_FAILURE);
}
int devID = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
// This requires a minimum of SM 1.3 to be able to run
if ((deviceProp.major > 1) ||
(deviceProp.major == 1 && deviceProp.minor >= 3))
{
Graph graph;
buildGraph(image, imageWidth, imageHeight, graph);
Pyramid segmentations;
cout << "* Building segmentation tree... ";
cout.flush();
SegmentationTreeBuilder algo;
float elapsedTime = algo.run(graph, segmentations);
cout << "done in " << elapsedTime << " (ms)" << endl;
cout << "* Dumping levels for each tree..." << endl << endl;
segmentations.dump(imageWidth, imageHeight);
bool bResults[2];
bResults[0] = sdkComparePPM("level_00.ppm",
sdkFindFilePath("ref_00.ppm", argv[0]),
5.0f,
0.15f,
false);
bResults[1] = sdkComparePPM("level_09.ppm",
sdkFindFilePath("ref_09.ppm", argv[0]),
5.0f,
0.15f,
false);
cudaDeviceReset();
exit((bResults[0] && bResults[1]) ? EXIT_SUCCESS : EXIT_FAILURE);
}
else
{
printf("segmentationTreeThrust requires a GPU with Compute Capability "
"1.3 or higher, exiting...\n\n");
cudaDeviceReset();
exit(EXIT_SUCCESS);
}
}
|
3067f617c0909e85e533020d1788bb0dff508779.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
CUDA implementation of the NFFT.
-----------
Accelerating the Non-equispaced Fast Fourier Transform on Commodity Graphics Hardware.
T.S. Srensen, T. Schaeffter, K.. Noe, M.S. Hansen.
IEEE Transactions on Medical Imaging 2008; 27(4):538-547.
Real-time Reconstruction of Sensitivity Encoded Radial Magnetic Resonance Imaging Using a Graphics Processing Unit.
T.S. Srensen, D. Atkinson, T. Schaeffter, M.S. Hansen.
IEEE Transactions on Medical Imaging 2009; 28(12): 1974-1985.
*/
#include "cuNFFT.h"
#include "cuNDArray_elemwise.h"
#include "cuNDArray_operators.h"
#include "cuNDArray_utils.h"
#include "cuNDFFT.h"
#include "NFFT.hpp"
using namespace Gadgetron;
template<class REAL, unsigned int D, ConvolutionType CONV>
Gadgetron::cuNFFT_impl<REAL, D, CONV>::cuNFFT_impl(
const vector_td<size_t, D>& matrix_size,
const vector_td<size_t, D>& matrix_size_os,
REAL W,
int device)
: NFFT_plan<cuNDArray, REAL, D>(matrix_size, matrix_size_os, W)
{
// Minimal initialization.
this->initialize(device);
}
template<class REAL, unsigned int D, ConvolutionType CONV>
void
Gadgetron::cuNFFT_impl<REAL, D, CONV>::fft(cuNDArray <complext<REAL>>& data, NFFT_fft_mode mode, bool do_scale) {
if (mode == NFFT_fft_mode::FORWARDS) {
fft_plan.fftc(data,D, do_scale);
} else {
fft_plan.ifftc(data,D, do_scale);
}
}
template<class REAL, unsigned int D, ConvolutionType CONV>
void
Gadgetron::cuNFFT_impl<REAL, D, CONV>::deapodize(cuNDArray <complext<REAL>>& image, bool fourier_domain) {
typename uint64d<D>::Type image_dims = from_std_vector<size_t, D>(*image.get_dimensions());
bool oversampled_image = (image_dims == this->matrix_size_os_);
if (!oversampled_image) {
throw std::runtime_error("Error: cuNFFT_impl::deapodize: ERROR: oversampled image not provided as input.");
}
if (fourier_domain) {
if (!deapodization_filterFFT)
deapodization_filterFFT = compute_deapodization_filter(true);
image *= *deapodization_filterFFT;
} else {
if (!deapodization_filter)
deapodization_filter = compute_deapodization_filter(false);
image *= *deapodization_filter;
}
}
template<class REAL, unsigned int D, ConvolutionType CONV>
void Gadgetron::cuNFFT_impl<REAL, D, CONV>::initialize(int device)
{
// Device checks.
if (hipGetDevice(&this->device_) != hipSuccess)
{
throw cuda_error("Error: cuNFFT_impl::barebones:: unable to get this->device_ no");
}
if (device < 0)
{
if (hipGetDevice(&this->device_) != hipSuccess)
{
throw cuda_error("Error: cuNFFT_impl::setup: unable to determine "
"device properties.");
}
}
else
{
this->device_ = device;
}
int device_no_old;
if (hipGetDevice(&device_no_old) != hipSuccess)
throw cuda_error("Error: cuNFFT_impl::setup: unable to get device number");
if (this->device_ != device_no_old &&
hipSetDevice(this->device_) != hipSuccess)
throw cuda_error("Error: cuNFFT_impl::setup: unable to set device");
if (this->device_ != device_no_old &&
hipSetDevice(device_no_old) != hipSuccess)
throw cuda_error("Error: cuNFFT_impl::setup: unable to restore device");
}
//
// Grid fictitious trajectory with a single sample at the origin
//
template<class REAL, unsigned int D, template<class, unsigned int> class K>
__global__ void
compute_deapodization_filter_kernel(typename uintd<D>::Type matrix_size_os,
typename reald<REAL, D>::Type matrix_size_os_real,
complext <REAL> *__restrict__ image_os,
const ConvolutionKernel<REAL, D, K>* kernel)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int num_elements = prod(matrix_size_os);
if (idx < num_elements) {
// Compute weight from Kaiser-Bessel filter
const typename uintd<D>::Type cell_pos = idx_to_co(idx, matrix_size_os);
// Sample position ("origin")
const vector_td<REAL, D> sample_pos = REAL(0.5) * matrix_size_os_real;
// Calculate the distance between the cell and the sample
vector_td<REAL, D>
cell_pos_real = vector_td<REAL, D>(cell_pos);
const typename reald<REAL, D>::Type delta = abs(sample_pos - cell_pos_real);
// Compute convolution weight.
REAL weight = kernel->get(delta);
// Output weight
image_os[idx] = complext<REAL>(weight, 0.0f);
}
}
//
// Function to calculate the deapodization filter
//
template<class REAL, unsigned int D, ConvolutionType CONV>
boost::shared_ptr<cuNDArray < complext < REAL> > >
Gadgetron::cuNFFT_impl<REAL, D, CONV>::compute_deapodization_filter(bool FFTed) {
std::vector<size_t> tmp_vec_os = to_std_vector(this->matrix_size_os_);
auto filter = boost::make_shared<cuNDArray<complext<REAL>>>(tmp_vec_os);
vector_td<REAL, D>
matrix_size_os_real = vector_td<REAL, D>(this->matrix_size_os_);
// Find dimensions of grid/blocks.
dim3 dimBlock(256);
dim3 dimGrid((prod(this->matrix_size_os_) + dimBlock.x - 1) / dimBlock.x);
// Invoke kernel
hipLaunchKernelGGL(( compute_deapodization_filter_kernel<REAL, D>), dim3(dimGrid), dim3(dimBlock), 0, 0,
vector_td<unsigned int, D>(
this->matrix_size_os_), matrix_size_os_real,
filter->get_data_ptr(),
dynamic_cast<const cuGriddingConvolution<complext<REAL>, D, KaiserKernel>*>(
this->conv_.get())->get_kernel_d());
CHECK_FOR_CUDA_ERROR();
// FFT
if (FFTed) {
fft(*filter, NFFT_fft_mode::FORWARDS, false);
} else {
fft(*filter, NFFT_fft_mode::BACKWARDS, false);
}
// Reciprocal
reciprocal_inplace(filter.get());
return filter;
}
namespace Gadgetron {
template<class REAL, unsigned int D>
boost::shared_ptr<cuNFFT_plan<REAL, D>> NFFT<cuNDArray, REAL, D>::make_plan(const vector_td<size_t,D>& matrix_size, const vector_td<size_t,D>& matrix_size_os, REAL W, ConvolutionType conv) {
switch (conv) {
case ConvolutionType::STANDARD:
return boost::make_shared<cuNFFT_impl<REAL, D, ConvolutionType::STANDARD> >(matrix_size,matrix_size_os,W);
case ConvolutionType::ATOMIC:
return boost::make_shared<cuNFFT_impl<REAL, D, ConvolutionType::ATOMIC> >(matrix_size,matrix_size_os,W);
case ConvolutionType::SPARSE_MATRIX:
return boost::make_shared<cuNFFT_impl<REAL, D, ConvolutionType::SPARSE_MATRIX>>(matrix_size,matrix_size_os,W);
}
throw std::runtime_error(
"Invalid convolution type provided. If you're reading this, you may have broken your computer quite badly");
}
template<unsigned int D>
boost::shared_ptr<cuNFFT_plan<double, D>> NFFT<cuNDArray, double, D>::make_plan(const vector_td<size_t,D>& matrix_size, const vector_td<size_t,D>& matrix_size_os, double W, ConvolutionType conv) {
if (conv == ConvolutionType::STANDARD) {
return boost::make_shared<cuNFFT_impl<double, D, ConvolutionType::STANDARD>>(matrix_size,matrix_size_os,W);
}
throw std::runtime_error("Only standard convolution type supported for doubles");
}
}
//
// Template instantion
//
template
class Gadgetron::cuNFFT_impl<float, 1, ConvolutionType::ATOMIC>;
template
class Gadgetron::cuNFFT_impl<float, 1, ConvolutionType::SPARSE_MATRIX>;
template
class Gadgetron::cuNFFT_impl<float, 1>;
template
class Gadgetron::cuNFFT_impl<double, 1>;
template
class Gadgetron::cuNFFT_impl<float, 2, ConvolutionType::ATOMIC>;
template
class Gadgetron::cuNFFT_impl<float, 2, ConvolutionType::SPARSE_MATRIX>;
template
class Gadgetron::cuNFFT_impl<float, 2>;
template
class Gadgetron::cuNFFT_impl<double, 2>;
template
class Gadgetron::cuNFFT_impl<float, 3, ConvolutionType::ATOMIC>;
template
class Gadgetron::cuNFFT_impl<float, 3, ConvolutionType::SPARSE_MATRIX>;
template
class Gadgetron::cuNFFT_impl<float, 3>;
template
class Gadgetron::cuNFFT_impl<double, 3>;
template
class Gadgetron::cuNFFT_impl<float, 4, ConvolutionType::ATOMIC>;
template
class Gadgetron::cuNFFT_impl<float, 4, ConvolutionType::SPARSE_MATRIX>;
template
class Gadgetron::cuNFFT_impl<float, 4>;
template
class Gadgetron::cuNFFT_impl<double, 4>;
template class Gadgetron::NFFT<cuNDArray,float,1>;
template class Gadgetron::NFFT<cuNDArray,float,2>;
template class Gadgetron::NFFT<cuNDArray,float,3>;
template class Gadgetron::NFFT<cuNDArray,float,4>;
template class Gadgetron::NFFT<cuNDArray,double,1>;
template class Gadgetron::NFFT<cuNDArray,double,2>;
template class Gadgetron::NFFT<cuNDArray,double,3>;
template class Gadgetron::NFFT<cuNDArray,double,4>;
| 3067f617c0909e85e533020d1788bb0dff508779.cu | /*
CUDA implementation of the NFFT.
-----------
Accelerating the Non-equispaced Fast Fourier Transform on Commodity Graphics Hardware.
T.S. Sørensen, T. Schaeffter, K.Ø. Noe, M.S. Hansen.
IEEE Transactions on Medical Imaging 2008; 27(4):538-547.
Real-time Reconstruction of Sensitivity Encoded Radial Magnetic Resonance Imaging Using a Graphics Processing Unit.
T.S. Sørensen, D. Atkinson, T. Schaeffter, M.S. Hansen.
IEEE Transactions on Medical Imaging 2009; 28(12): 1974-1985.
*/
#include "cuNFFT.h"
#include "cuNDArray_elemwise.h"
#include "cuNDArray_operators.h"
#include "cuNDArray_utils.h"
#include "cuNDFFT.h"
#include "NFFT.hpp"
using namespace Gadgetron;
template<class REAL, unsigned int D, ConvolutionType CONV>
Gadgetron::cuNFFT_impl<REAL, D, CONV>::cuNFFT_impl(
const vector_td<size_t, D>& matrix_size,
const vector_td<size_t, D>& matrix_size_os,
REAL W,
int device)
: NFFT_plan<cuNDArray, REAL, D>(matrix_size, matrix_size_os, W)
{
// Minimal initialization.
this->initialize(device);
}
template<class REAL, unsigned int D, ConvolutionType CONV>
void
Gadgetron::cuNFFT_impl<REAL, D, CONV>::fft(cuNDArray <complext<REAL>>& data, NFFT_fft_mode mode, bool do_scale) {
if (mode == NFFT_fft_mode::FORWARDS) {
fft_plan.fftc(data,D, do_scale);
} else {
fft_plan.ifftc(data,D, do_scale);
}
}
template<class REAL, unsigned int D, ConvolutionType CONV>
void
Gadgetron::cuNFFT_impl<REAL, D, CONV>::deapodize(cuNDArray <complext<REAL>>& image, bool fourier_domain) {
typename uint64d<D>::Type image_dims = from_std_vector<size_t, D>(*image.get_dimensions());
bool oversampled_image = (image_dims == this->matrix_size_os_);
if (!oversampled_image) {
throw std::runtime_error("Error: cuNFFT_impl::deapodize: ERROR: oversampled image not provided as input.");
}
if (fourier_domain) {
if (!deapodization_filterFFT)
deapodization_filterFFT = compute_deapodization_filter(true);
image *= *deapodization_filterFFT;
} else {
if (!deapodization_filter)
deapodization_filter = compute_deapodization_filter(false);
image *= *deapodization_filter;
}
}
template<class REAL, unsigned int D, ConvolutionType CONV>
void Gadgetron::cuNFFT_impl<REAL, D, CONV>::initialize(int device)
{
// Device checks.
if (cudaGetDevice(&this->device_) != cudaSuccess)
{
throw cuda_error("Error: cuNFFT_impl::barebones:: unable to get this->device_ no");
}
if (device < 0)
{
if (cudaGetDevice(&this->device_) != cudaSuccess)
{
throw cuda_error("Error: cuNFFT_impl::setup: unable to determine "
"device properties.");
}
}
else
{
this->device_ = device;
}
int device_no_old;
if (cudaGetDevice(&device_no_old) != cudaSuccess)
throw cuda_error("Error: cuNFFT_impl::setup: unable to get device number");
if (this->device_ != device_no_old &&
cudaSetDevice(this->device_) != cudaSuccess)
throw cuda_error("Error: cuNFFT_impl::setup: unable to set device");
if (this->device_ != device_no_old &&
cudaSetDevice(device_no_old) != cudaSuccess)
throw cuda_error("Error: cuNFFT_impl::setup: unable to restore device");
}
//
// Grid fictitious trajectory with a single sample at the origin
//
template<class REAL, unsigned int D, template<class, unsigned int> class K>
__global__ void
compute_deapodization_filter_kernel(typename uintd<D>::Type matrix_size_os,
typename reald<REAL, D>::Type matrix_size_os_real,
complext <REAL> *__restrict__ image_os,
const ConvolutionKernel<REAL, D, K>* kernel)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int num_elements = prod(matrix_size_os);
if (idx < num_elements) {
// Compute weight from Kaiser-Bessel filter
const typename uintd<D>::Type cell_pos = idx_to_co(idx, matrix_size_os);
// Sample position ("origin")
const vector_td<REAL, D> sample_pos = REAL(0.5) * matrix_size_os_real;
// Calculate the distance between the cell and the sample
vector_td<REAL, D>
cell_pos_real = vector_td<REAL, D>(cell_pos);
const typename reald<REAL, D>::Type delta = abs(sample_pos - cell_pos_real);
// Compute convolution weight.
REAL weight = kernel->get(delta);
// Output weight
image_os[idx] = complext<REAL>(weight, 0.0f);
}
}
//
// Function to calculate the deapodization filter
//
template<class REAL, unsigned int D, ConvolutionType CONV>
boost::shared_ptr<cuNDArray < complext < REAL> > >
Gadgetron::cuNFFT_impl<REAL, D, CONV>::compute_deapodization_filter(bool FFTed) {
std::vector<size_t> tmp_vec_os = to_std_vector(this->matrix_size_os_);
auto filter = boost::make_shared<cuNDArray<complext<REAL>>>(tmp_vec_os);
vector_td<REAL, D>
matrix_size_os_real = vector_td<REAL, D>(this->matrix_size_os_);
// Find dimensions of grid/blocks.
dim3 dimBlock(256);
dim3 dimGrid((prod(this->matrix_size_os_) + dimBlock.x - 1) / dimBlock.x);
// Invoke kernel
compute_deapodization_filter_kernel<REAL, D><<<dimGrid, dimBlock>>>(
vector_td<unsigned int, D>(
this->matrix_size_os_), matrix_size_os_real,
filter->get_data_ptr(),
dynamic_cast<const cuGriddingConvolution<complext<REAL>, D, KaiserKernel>*>(
this->conv_.get())->get_kernel_d());
CHECK_FOR_CUDA_ERROR();
// FFT
if (FFTed) {
fft(*filter, NFFT_fft_mode::FORWARDS, false);
} else {
fft(*filter, NFFT_fft_mode::BACKWARDS, false);
}
// Reciprocal
reciprocal_inplace(filter.get());
return filter;
}
namespace Gadgetron {
template<class REAL, unsigned int D>
boost::shared_ptr<cuNFFT_plan<REAL, D>> NFFT<cuNDArray, REAL, D>::make_plan(const vector_td<size_t,D>& matrix_size, const vector_td<size_t,D>& matrix_size_os, REAL W, ConvolutionType conv) {
switch (conv) {
case ConvolutionType::STANDARD:
return boost::make_shared<cuNFFT_impl<REAL, D, ConvolutionType::STANDARD> >(matrix_size,matrix_size_os,W);
case ConvolutionType::ATOMIC:
return boost::make_shared<cuNFFT_impl<REAL, D, ConvolutionType::ATOMIC> >(matrix_size,matrix_size_os,W);
case ConvolutionType::SPARSE_MATRIX:
return boost::make_shared<cuNFFT_impl<REAL, D, ConvolutionType::SPARSE_MATRIX>>(matrix_size,matrix_size_os,W);
}
throw std::runtime_error(
"Invalid convolution type provided. If you're reading this, you may have broken your computer quite badly");
}
template<unsigned int D>
boost::shared_ptr<cuNFFT_plan<double, D>> NFFT<cuNDArray, double, D>::make_plan(const vector_td<size_t,D>& matrix_size, const vector_td<size_t,D>& matrix_size_os, double W, ConvolutionType conv) {
if (conv == ConvolutionType::STANDARD) {
return boost::make_shared<cuNFFT_impl<double, D, ConvolutionType::STANDARD>>(matrix_size,matrix_size_os,W);
}
throw std::runtime_error("Only standard convolution type supported for doubles");
}
}
//
// Template instantion
//
template
class Gadgetron::cuNFFT_impl<float, 1, ConvolutionType::ATOMIC>;
template
class Gadgetron::cuNFFT_impl<float, 1, ConvolutionType::SPARSE_MATRIX>;
template
class Gadgetron::cuNFFT_impl<float, 1>;
template
class Gadgetron::cuNFFT_impl<double, 1>;
template
class Gadgetron::cuNFFT_impl<float, 2, ConvolutionType::ATOMIC>;
template
class Gadgetron::cuNFFT_impl<float, 2, ConvolutionType::SPARSE_MATRIX>;
template
class Gadgetron::cuNFFT_impl<float, 2>;
template
class Gadgetron::cuNFFT_impl<double, 2>;
template
class Gadgetron::cuNFFT_impl<float, 3, ConvolutionType::ATOMIC>;
template
class Gadgetron::cuNFFT_impl<float, 3, ConvolutionType::SPARSE_MATRIX>;
template
class Gadgetron::cuNFFT_impl<float, 3>;
template
class Gadgetron::cuNFFT_impl<double, 3>;
template
class Gadgetron::cuNFFT_impl<float, 4, ConvolutionType::ATOMIC>;
template
class Gadgetron::cuNFFT_impl<float, 4, ConvolutionType::SPARSE_MATRIX>;
template
class Gadgetron::cuNFFT_impl<float, 4>;
template
class Gadgetron::cuNFFT_impl<double, 4>;
template class Gadgetron::NFFT<cuNDArray,float,1>;
template class Gadgetron::NFFT<cuNDArray,float,2>;
template class Gadgetron::NFFT<cuNDArray,float,3>;
template class Gadgetron::NFFT<cuNDArray,float,4>;
template class Gadgetron::NFFT<cuNDArray,double,1>;
template class Gadgetron::NFFT<cuNDArray,double,2>;
template class Gadgetron::NFFT<cuNDArray,double,3>;
template class Gadgetron::NFFT<cuNDArray,double,4>;
|
c30da5a0ccb890cc21ae78f8356a59a2dbc906d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include "yololayer.h"
#include "utils.h"
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel)
{
mClassCount = classCount;
mYoloV5NetWidth = netWidth;
mYoloV5NetHeight = netHeight;
mMaxOutObject = maxOut;
mYoloKernel = vYoloKernel;
mKernelCount = vYoloKernel.size();
CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin()
{
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(hipFree(mAnchor[ii]));
}
CUDA_CHECK(hipHostFree(mAnchor));
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
//using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
read(d, mYoloV5NetWidth);
read(d, mYoloV5NetHeight);
read(d, mMaxOutObject);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(mYoloKernel.data(), d, kernelSize);
d += kernelSize;
CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
//using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
write(d, mYoloV5NetWidth);
write(d, mYoloV5NetHeight);
write(d, mMaxOutObject);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(d, mYoloKernel.data(), kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject);
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel);
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); };
__global__ void CalDetection(const float *input, float *output, int noElements,
const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes, int outputElem)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid * bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < 3; ++k) {
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (box_prob < IGNORE_THRESH) continue;
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float *res_count = output + bnIdx * outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= maxoutobject) return;
char* data = (char *)res_count + sizeof(float) + count * sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
// pytorch:
// y = x[i].sigmoid()
// y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
// y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
// X: (sigmoid(tx) + cx)/FeaturemapW * netwidth
det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth;
det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight;
// W: (Pw * e^tw) / FeaturemapW * netwidth
// v5: https://github.com/ultralytics/yolov5/issues/471
det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]);
det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k];
det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]);
det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1];
det->conf = box_prob * max_cls_prob;
det->class_id = class_id;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, hipStream_t stream, int batchSize)
{
int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float);
for (int idx = 0; idx < batchSize; ++idx) {
CUDA_CHECK(hipMemset(output + idx * outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0; i < mYoloKernel.size(); ++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
//printf("Net: %d %d \n", mYoloV5NetWidth, mYoloV5NetHeight);
CalDetection << < (yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount, mThreadCount >> >
(inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float *)mAnchor[i], mClassCount, outputElem);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream)
{
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
int class_count = 80;
int input_w = 416;
int input_h = 416;
int max_output_object_count = 1000;
std::vector<Yolo::YoloKernel> yolo_kernels(3);
const PluginField* fields = fc->fields;
for (int i = 0; i < fc->nbFields; i++) {
if (strcmp(fields[i].name, "netdata") == 0) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
class_count = tmp[0];
input_w = tmp[1];
input_h = tmp[2];
max_output_object_count = tmp[3];
} else if (strstr(fields[i].name, "yolodata") != NULL) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
YoloKernel kernel;
kernel.width = tmp[0];
kernel.height = tmp[1];
for (int j = 0; j < fields[i].length - 2; j++) {
kernel.anchors[j] = tmp[j + 2];
}
yolo_kernels[2 - (fields[i].name[8] - '1')] = kernel;
}
}
YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, yolo_kernels);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call YoloLayerPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
| c30da5a0ccb890cc21ae78f8356a59a2dbc906d8.cu | #include <assert.h>
#include "yololayer.h"
#include "utils.h"
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel)
{
mClassCount = classCount;
mYoloV5NetWidth = netWidth;
mYoloV5NetHeight = netHeight;
mMaxOutObject = maxOut;
mYoloKernel = vYoloKernel;
mKernelCount = vYoloKernel.size();
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin()
{
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(cudaFree(mAnchor[ii]));
}
CUDA_CHECK(cudaFreeHost(mAnchor));
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
//using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
read(d, mYoloV5NetWidth);
read(d, mYoloV5NetHeight);
read(d, mMaxOutObject);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(mYoloKernel.data(), d, kernelSize);
d += kernelSize;
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
//using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
write(d, mYoloV5NetWidth);
write(d, mYoloV5NetHeight);
write(d, mMaxOutObject);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(d, mYoloKernel.data(), kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject);
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel);
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); };
__global__ void CalDetection(const float *input, float *output, int noElements,
const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes, int outputElem)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid * bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < 3; ++k) {
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (box_prob < IGNORE_THRESH) continue;
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float *res_count = output + bnIdx * outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= maxoutobject) return;
char* data = (char *)res_count + sizeof(float) + count * sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
// pytorch:
// y = x[i].sigmoid()
// y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
// y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
// X: (sigmoid(tx) + cx)/FeaturemapW * netwidth
det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth;
det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight;
// W: (Pw * e^tw) / FeaturemapW * netwidth
// v5: https://github.com/ultralytics/yolov5/issues/471
det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]);
det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k];
det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]);
det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1];
det->conf = box_prob * max_cls_prob;
det->class_id = class_id;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, cudaStream_t stream, int batchSize)
{
int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float);
for (int idx = 0; idx < batchSize; ++idx) {
CUDA_CHECK(cudaMemset(output + idx * outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0; i < mYoloKernel.size(); ++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
//printf("Net: %d %d \n", mYoloV5NetWidth, mYoloV5NetHeight);
CalDetection << < (yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount, mThreadCount >> >
(inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float *)mAnchor[i], mClassCount, outputElem);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)
{
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
int class_count = 80;
int input_w = 416;
int input_h = 416;
int max_output_object_count = 1000;
std::vector<Yolo::YoloKernel> yolo_kernels(3);
const PluginField* fields = fc->fields;
for (int i = 0; i < fc->nbFields; i++) {
if (strcmp(fields[i].name, "netdata") == 0) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
class_count = tmp[0];
input_w = tmp[1];
input_h = tmp[2];
max_output_object_count = tmp[3];
} else if (strstr(fields[i].name, "yolodata") != NULL) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
YoloKernel kernel;
kernel.width = tmp[0];
kernel.height = tmp[1];
for (int j = 0; j < fields[i].length - 2; j++) {
kernel.anchors[j] = tmp[j + 2];
}
yolo_kernels[2 - (fields[i].name[8] - '1')] = kernel;
}
}
YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, yolo_kernels);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call YoloLayerPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
|
4f10ff8128a9e7de74db473ff1057652c84926a7.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/Math.h>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char digamma_name[] = "digamma";
#endif // AT_USE_JITERATOR()
// See note [Jiterator]
void digamma_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "digamma_cuda", [&]() {
jitted_gpu_kernel</*name=*/digamma_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, digamma_string);
});
#else
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "digamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_digamma(a);
});
});
#endif // AT_USE_JITERATOR()
}
// See note [Jiterator]
CONSTEXPR_EXCEPT_WIN_CUDA char trigamma_name[] = "trigamma";
void trigamma_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "trigamma_cuda", [&]() {
jitted_gpu_kernel</*name=*/trigamma_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, trigamma_string);
});
#else
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "trigamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_trigamma(a);
});
});
#endif // AT_USE_JITERATOR()
}
CONSTEXPR_EXCEPT_WIN_CUDA char polygamma_name[] = "polygamma";
void polygamma_kernel_cuda(TensorIteratorBase& iter, int64_t n) {
if (n == 0) {
digamma_kernel_cuda(iter);
} else if (n == 1) {
trigamma_kernel_cuda(iter);
} else {
#if AT_USE_JITERATOR()
// TODO : `unary_jitted_gpu_kernel` for cleaner UX.
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
iter.common_dtype(), "polygamma_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/polygamma_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(
iter,
polygamma_string,
/*scalar_pos=*/at::cuda::jit::BinaryFuncVariant::NoScalar,
/*scalar_val=*/0,
/*extra_args=*/std::make_tuple(n));
});
#else
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
iter.common_dtype(), "polygamma_cuda", [&]() {
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_polygamma<scalar_t, /*is_cuda=*/true>(a, static_cast<int>(n));
});
});
#endif // AT_USE_JITERATOR()
}
}
CONSTEXPR_EXCEPT_WIN_CUDA char lgamma_name[] = "lgamma_kernel";
void lgamma_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "lgamma_cuda", [&]() {
jitted_gpu_kernel</*name=*/lgamma_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, lgamma_string);
});
#else
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "lgamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::lgamma(a);
});
});
#endif
}
REGISTER_DISPATCH(digamma_stub, &digamma_kernel_cuda);
REGISTER_DISPATCH(polygamma_stub, &polygamma_kernel_cuda);
REGISTER_DISPATCH(lgamma_stub, &lgamma_kernel_cuda);
} // namespace at::native
| 4f10ff8128a9e7de74db473ff1057652c84926a7.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/Math.h>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char digamma_name[] = "digamma";
#endif // AT_USE_JITERATOR()
// See note [Jiterator]
void digamma_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "digamma_cuda", [&]() {
jitted_gpu_kernel</*name=*/digamma_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, digamma_string);
});
#else
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "digamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_digamma(a);
});
});
#endif // AT_USE_JITERATOR()
}
// See note [Jiterator]
CONSTEXPR_EXCEPT_WIN_CUDA char trigamma_name[] = "trigamma";
void trigamma_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "trigamma_cuda", [&]() {
jitted_gpu_kernel</*name=*/trigamma_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, trigamma_string);
});
#else
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "trigamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_trigamma(a);
});
});
#endif // AT_USE_JITERATOR()
}
CONSTEXPR_EXCEPT_WIN_CUDA char polygamma_name[] = "polygamma";
void polygamma_kernel_cuda(TensorIteratorBase& iter, int64_t n) {
if (n == 0) {
digamma_kernel_cuda(iter);
} else if (n == 1) {
trigamma_kernel_cuda(iter);
} else {
#if AT_USE_JITERATOR()
// TODO : `unary_jitted_gpu_kernel` for cleaner UX.
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
iter.common_dtype(), "polygamma_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/polygamma_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(
iter,
polygamma_string,
/*scalar_pos=*/at::cuda::jit::BinaryFuncVariant::NoScalar,
/*scalar_val=*/0,
/*extra_args=*/std::make_tuple(n));
});
#else
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
iter.common_dtype(), "polygamma_cuda", [&]() {
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_polygamma<scalar_t, /*is_cuda=*/true>(a, static_cast<int>(n));
});
});
#endif // AT_USE_JITERATOR()
}
}
CONSTEXPR_EXCEPT_WIN_CUDA char lgamma_name[] = "lgamma_kernel";
void lgamma_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "lgamma_cuda", [&]() {
jitted_gpu_kernel</*name=*/lgamma_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, lgamma_string);
});
#else
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "lgamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::lgamma(a);
});
});
#endif
}
REGISTER_DISPATCH(digamma_stub, &digamma_kernel_cuda);
REGISTER_DISPATCH(polygamma_stub, &polygamma_kernel_cuda);
REGISTER_DISPATCH(lgamma_stub, &lgamma_kernel_cuda);
} // namespace at::native
|
54d4e5a717e0e21ec7d03b337d66c05097378b66.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * blockDim.x;
for (int i = index; i < n; i += stride) {
y[i] = x[i] + y[i];
}
}
int main(void) {
int N = 1<<20;
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
} | 54d4e5a717e0e21ec7d03b337d66c05097378b66.cu | #include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * blockDim.x;
for (int i = index; i < n; i += stride) {
y[i] = x[i] + y[i];
}
}
int main(void) {
int N = 1<<20;
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
} |
d5876601a6a88fbc453b8043201191c4e36236fb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "d_count_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *d_pivots = NULL;
hipMalloc(&d_pivots, XSIZE*YSIZE);
int *r_buckets = NULL;
hipMalloc(&r_buckets, XSIZE*YSIZE);
int pivotsLength = 1;
unsigned int *r_indices = NULL;
hipMalloc(&r_indices, XSIZE*YSIZE);
unsigned int *r_sublist = NULL;
hipMalloc(&r_sublist, XSIZE*YSIZE);
unsigned int *d_in = NULL;
hipMalloc(&d_in, XSIZE*YSIZE);
int itemCount = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
d_count_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_pivots,r_buckets,pivotsLength,r_indices,r_sublist,d_in,itemCount);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
d_count_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_pivots,r_buckets,pivotsLength,r_indices,r_sublist,d_in,itemCount);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
d_count_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_pivots,r_buckets,pivotsLength,r_indices,r_sublist,d_in,itemCount);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d5876601a6a88fbc453b8043201191c4e36236fb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "d_count_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *d_pivots = NULL;
cudaMalloc(&d_pivots, XSIZE*YSIZE);
int *r_buckets = NULL;
cudaMalloc(&r_buckets, XSIZE*YSIZE);
int pivotsLength = 1;
unsigned int *r_indices = NULL;
cudaMalloc(&r_indices, XSIZE*YSIZE);
unsigned int *r_sublist = NULL;
cudaMalloc(&r_sublist, XSIZE*YSIZE);
unsigned int *d_in = NULL;
cudaMalloc(&d_in, XSIZE*YSIZE);
int itemCount = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
d_count_kernel<<<gridBlock,threadBlock>>>(d_pivots,r_buckets,pivotsLength,r_indices,r_sublist,d_in,itemCount);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
d_count_kernel<<<gridBlock,threadBlock>>>(d_pivots,r_buckets,pivotsLength,r_indices,r_sublist,d_in,itemCount);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
d_count_kernel<<<gridBlock,threadBlock>>>(d_pivots,r_buckets,pivotsLength,r_indices,r_sublist,d_in,itemCount);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
35a22943cbd43ccb59fdd1d722f3c1a84d3776b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Raffaele Solca
@author Mark Gates
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#define NB 64
/******************************************************************************/
/*
GPU kernel for setting the k-1 super-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil((m+k-1)/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread k-1 does the main diagonal, thread k-2 the first super-diagonal, etc.
block 0 block 1
0 => skip above matrix
1 0 => skip above matrix
2 1 0 => skip above matrix
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ | 3 2 1 0 ]
[ | 3 2 1 ]
| 3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=10, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void zlaset_band_upper(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *A, int lda)
{
int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x - k + 1;
A += ind + ibx*lda;
magmaDoubleComplex value = offdiag;
if (threadIdx.x == k-1)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j >= 0 && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/******************************************************************************/
/*
GPU kernel for setting the k-1 sub-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil(m/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread 0 does the main diagonal, thread 1 the first sub-diagonal, etc.
block 0 block 1
[ 0 | ]
[ 1 0 | ]
[ 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ 3 2 1 0 ]
[ 3 2 1 ]
3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=13, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void zlaset_band_lower(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *A, int lda)
{
//int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x;
A += ind + ibx*lda;
magmaDoubleComplex value = offdiag;
if (threadIdx.x == 0)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/***************************************************************************//**
Purpose
-------
ZLASET_BAND initializes the main diagonal of dA to DIAG,
and the K-1 sub- or super-diagonals to OFFDIAG.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
k INTEGER
The number of diagonals to set, including the main diagonal. K >= 0.
Currently, K <= 1024 due to CUDA restrictions (max. number of threads per block).
@param[in]
offdiag COMPLEX_16
Off-diagonal elements in the band are set to OFFDIAG.
@param[in]
diag COMPLEX_16
All the main diagonal elements are set to DIAG.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n where i != j, abs(i-j) < k;
and A(i,i) = BETA, 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Stream to execute ZLASET in.
@ingroup magma_laset_band
*******************************************************************************/
extern "C" void
magmablas_zlaset_band(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( k < 0 || k > 1024 )
info = -4;
else if ( ldda < max(1,m) )
info = -6;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if (uplo == MagmaUpper) {
dim3 threads( min(k,n) );
dim3 grid( magma_ceildiv( min(m+k-1,n), NB ) );
hipLaunchKernelGGL(( zlaset_band_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dA, ldda);
}
else if (uplo == MagmaLower) {
dim3 threads( min(k,m) );
dim3 grid( magma_ceildiv( min(m,n), NB ) );
hipLaunchKernelGGL(( zlaset_band_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dA, ldda);
}
}
| 35a22943cbd43ccb59fdd1d722f3c1a84d3776b0.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Raffaele Solca
@author Mark Gates
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#define NB 64
/******************************************************************************/
/*
GPU kernel for setting the k-1 super-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil((m+k-1)/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread k-1 does the main diagonal, thread k-2 the first super-diagonal, etc.
block 0 block 1
0 => skip above matrix
1 0 => skip above matrix
2 1 0 => skip above matrix
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ | 3 2 1 0 ]
[ | 3 2 1 ]
| 3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=10, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void zlaset_band_upper(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *A, int lda)
{
int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x - k + 1;
A += ind + ibx*lda;
magmaDoubleComplex value = offdiag;
if (threadIdx.x == k-1)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j >= 0 && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/******************************************************************************/
/*
GPU kernel for setting the k-1 sub-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil(m/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread 0 does the main diagonal, thread 1 the first sub-diagonal, etc.
block 0 block 1
[ 0 | ]
[ 1 0 | ]
[ 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ 3 2 1 0 ]
[ 3 2 1 ]
3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=13, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void zlaset_band_lower(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *A, int lda)
{
//int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x;
A += ind + ibx*lda;
magmaDoubleComplex value = offdiag;
if (threadIdx.x == 0)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/***************************************************************************//**
Purpose
-------
ZLASET_BAND initializes the main diagonal of dA to DIAG,
and the K-1 sub- or super-diagonals to OFFDIAG.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
k INTEGER
The number of diagonals to set, including the main diagonal. K >= 0.
Currently, K <= 1024 due to CUDA restrictions (max. number of threads per block).
@param[in]
offdiag COMPLEX_16
Off-diagonal elements in the band are set to OFFDIAG.
@param[in]
diag COMPLEX_16
All the main diagonal elements are set to DIAG.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n where i != j, abs(i-j) < k;
and A(i,i) = BETA, 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Stream to execute ZLASET in.
@ingroup magma_laset_band
*******************************************************************************/
extern "C" void
magmablas_zlaset_band(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( k < 0 || k > 1024 )
info = -4;
else if ( ldda < max(1,m) )
info = -6;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if (uplo == MagmaUpper) {
dim3 threads( min(k,n) );
dim3 grid( magma_ceildiv( min(m+k-1,n), NB ) );
zlaset_band_upper<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dA, ldda);
}
else if (uplo == MagmaLower) {
dim3 threads( min(k,m) );
dim3 grid( magma_ceildiv( min(m,n), NB ) );
zlaset_band_lower<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dA, ldda);
}
}
|
2a0245aee50692dc3deafa910ab2b2f360c18806.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
\brief sequence.cu
\author Andrew Kerr
\brief simple test of a CUDA implementation's ability to allocate memory on the device, launch
a kernel, and fetch its results. One kernel requires no syncthreads, another kernel requires
one synchronization
*/
#include <stdio.h>
extern "C" __global__ void sequence(int *A, int N) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < N) {
A[i] = 2*i;
}
}
extern "C" __global__ void testShr(int *A, const int *B) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int b;
__shared__ int storage[256];
storage[threadIdx.x] = B[i];
__syncthreads();
if (i & 1) {
b = storage[threadIdx.x ^ 1] * 2;
}
else {
b = storage[threadIdx.x ^ 1] * 3;
}
A[i] = b;
}
int main(int argc, char *arg[]) {
const int N = 32;
int *A_host, *A_gpu =0;
int errors = 0;
size_t bytes = sizeof(int)*N;
if (hipMalloc((void **)&A_gpu, bytes) != hipSuccess) {
printf("hipMalloc() - failed to allocate %d bytes on device\n", (int)bytes);
return -1;
}
A_host = (int *)malloc(bytes);
for (int i = 0; i < N; i++) {
A_host[i] = -1;
}
hipMemcpy(A_gpu, A_host, bytes, hipMemcpyHostToDevice);
dim3 grid((N+31)/32,1);
dim3 block(32, 1);
hipLaunchKernelGGL(( sequence), dim3(grid), dim3(block) , 0, 0, A_gpu, N);
hipMemcpy(A_host, A_gpu, bytes, hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
if (A_host[i] != 2*i) {
++errors;
}
}
int *B_gpu = 0;
if (hipMalloc((void **)&B_gpu, bytes) != hipSuccess) {
printf("hipMalloc() - failed to allocate %d bytes on device\n", (int)bytes);
hipFree(A_gpu);
free(A_host);
return -1;
}
hipLaunchKernelGGL(( sequence), dim3(grid), dim3(block) , 0, 0, A_gpu, N);
hipLaunchKernelGGL(( testShr), dim3(grid), dim3(block) , 0, 0, B_gpu, A_gpu);
if (hipMemcpy(A_host, B_gpu, bytes, hipMemcpyDeviceToHost) != hipSuccess) {
printf("hipMemcpy(A, B) - failed to copy %d bytes from device to host\n", (int)bytes);
hipFree(A_gpu);
hipFree(B_gpu);
free(A_host);
}
for (int i = 0; (errors < 5) && i < N; ++i) {
int b;
if (i & 1) {
b = (i ^ 1) * 2 * 2;
}
else {
b = (i ^ 1) * 2 * 3;
}
int got = A_host[i];
if (b != got) {
printf("ERROR 1 [%d] - expected: %d, got: %d\n", i, b, got);
++errors;
}
}
hipFree(B_gpu);
hipFree(A_gpu);
free(A_host);
if (errors) {
printf("Pass/Fail : Fail\n");
}
else {
printf("Pass/Fail : Pass\n");
}
return 0;
}
| 2a0245aee50692dc3deafa910ab2b2f360c18806.cu | /*!
\brief sequence.cu
\author Andrew Kerr
\brief simple test of a CUDA implementation's ability to allocate memory on the device, launch
a kernel, and fetch its results. One kernel requires no syncthreads, another kernel requires
one synchronization
*/
#include <stdio.h>
extern "C" __global__ void sequence(int *A, int N) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < N) {
A[i] = 2*i;
}
}
extern "C" __global__ void testShr(int *A, const int *B) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int b;
__shared__ int storage[256];
storage[threadIdx.x] = B[i];
__syncthreads();
if (i & 1) {
b = storage[threadIdx.x ^ 1] * 2;
}
else {
b = storage[threadIdx.x ^ 1] * 3;
}
A[i] = b;
}
int main(int argc, char *arg[]) {
const int N = 32;
int *A_host, *A_gpu =0;
int errors = 0;
size_t bytes = sizeof(int)*N;
if (cudaMalloc((void **)&A_gpu, bytes) != cudaSuccess) {
printf("cudaMalloc() - failed to allocate %d bytes on device\n", (int)bytes);
return -1;
}
A_host = (int *)malloc(bytes);
for (int i = 0; i < N; i++) {
A_host[i] = -1;
}
cudaMemcpy(A_gpu, A_host, bytes, cudaMemcpyHostToDevice);
dim3 grid((N+31)/32,1);
dim3 block(32, 1);
sequence<<< grid, block >>>(A_gpu, N);
cudaMemcpy(A_host, A_gpu, bytes, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
if (A_host[i] != 2*i) {
++errors;
}
}
int *B_gpu = 0;
if (cudaMalloc((void **)&B_gpu, bytes) != cudaSuccess) {
printf("cudaMalloc() - failed to allocate %d bytes on device\n", (int)bytes);
cudaFree(A_gpu);
free(A_host);
return -1;
}
sequence<<< grid, block >>>(A_gpu, N);
testShr<<< grid, block >>>(B_gpu, A_gpu);
if (cudaMemcpy(A_host, B_gpu, bytes, cudaMemcpyDeviceToHost) != cudaSuccess) {
printf("cudaMemcpy(A, B) - failed to copy %d bytes from device to host\n", (int)bytes);
cudaFree(A_gpu);
cudaFree(B_gpu);
free(A_host);
}
for (int i = 0; (errors < 5) && i < N; ++i) {
int b;
if (i & 1) {
b = (i ^ 1) * 2 * 2;
}
else {
b = (i ^ 1) * 2 * 3;
}
int got = A_host[i];
if (b != got) {
printf("ERROR 1 [%d] - expected: %d, got: %d\n", i, b, got);
++errors;
}
}
cudaFree(B_gpu);
cudaFree(A_gpu);
free(A_host);
if (errors) {
printf("Pass/Fail : Fail\n");
}
else {
printf("Pass/Fail : Pass\n");
}
return 0;
}
|
d80c474c118c632abb465cd2552baef9ee4f168b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_GPU
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/reduce.hpp"
#include "opencv2/gpu/device/utility.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/filters.hpp"
namespace cv { namespace gpu { namespace device
{
namespace surf
{
void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold);
void loadOctaveConstants(int octave, int layer_rows, int layer_cols);
void bindImgTex(PtrStepSzb img);
size_t bindSumTex(PtrStepSz<unsigned int> sum);
size_t bindMaskSumTex(PtrStepSz<unsigned int> maskSum);
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayer);
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nLayers);
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter);
void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures);
void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures);
}
}}}
namespace cv { namespace gpu { namespace device
{
namespace surf
{
////////////////////////////////////////////////////////////////////////
// Global parameters
// The maximum number of features (before subpixel interpolation) that memory is reserved for.
__constant__ int c_max_candidates;
// The maximum number of features that memory is reserved for.
__constant__ int c_max_features;
// The image size.
__constant__ int c_img_rows;
__constant__ int c_img_cols;
// The number of layers.
__constant__ int c_nOctaveLayers;
// The hessian threshold.
__constant__ float c_hessianThreshold;
// The current octave.
__constant__ int c_octave;
// The current layer size.
__constant__ int c_layer_rows;
__constant__ int c_layer_cols;
void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold)
{
cudaSafeCall( hipMemcpyToSymbol(c_max_candidates, &maxCandidates, sizeof(maxCandidates)) );
cudaSafeCall( hipMemcpyToSymbol(c_max_features, &maxFeatures, sizeof(maxFeatures)) );
cudaSafeCall( hipMemcpyToSymbol(c_img_rows, &img_rows, sizeof(img_rows)) );
cudaSafeCall( hipMemcpyToSymbol(c_img_cols, &img_cols, sizeof(img_cols)) );
cudaSafeCall( hipMemcpyToSymbol(c_nOctaveLayers, &nOctaveLayers, sizeof(nOctaveLayers)) );
cudaSafeCall( hipMemcpyToSymbol(c_hessianThreshold, &hessianThreshold, sizeof(hessianThreshold)) );
}
void loadOctaveConstants(int octave, int layer_rows, int layer_cols)
{
cudaSafeCall( hipMemcpyToSymbol(c_octave, &octave, sizeof(octave)) );
cudaSafeCall( hipMemcpyToSymbol(c_layer_rows, &layer_rows, sizeof(layer_rows)) );
cudaSafeCall( hipMemcpyToSymbol(c_layer_cols, &layer_cols, sizeof(layer_cols)) );
}
////////////////////////////////////////////////////////////////////////
// Integral image texture
texture<unsigned char, 2, hipReadModeElementType> imgTex(0, hipFilterModePoint, hipAddressModeClamp);
texture<unsigned int, 2, hipReadModeElementType> sumTex(0, hipFilterModePoint, hipAddressModeClamp);
texture<unsigned int, 2, hipReadModeElementType> maskSumTex(0, hipFilterModePoint, hipAddressModeClamp);
void bindImgTex(PtrStepSzb img)
{
bindTexture(&imgTex, img);
}
size_t bindSumTex(PtrStepSz<uint> sum)
{
size_t offset;
hipChannelFormatDesc desc_sum = hipCreateChannelDesc<uint>();
cudaSafeCall( hipBindTexture2D(&offset, sumTex, sum.data, desc_sum, sum.cols, sum.rows, sum.step));
return offset / sizeof(uint);
}
size_t bindMaskSumTex(PtrStepSz<uint> maskSum)
{
size_t offset;
hipChannelFormatDesc desc_sum = hipCreateChannelDesc<uint>();
cudaSafeCall( hipBindTexture2D(&offset, maskSumTex, maskSum.data, desc_sum, maskSum.cols, maskSum.rows, maskSum.step));
return offset / sizeof(uint);
}
template <int N> __device__ float icvCalcHaarPatternSum(const float src[][5], int oldSize, int newSize, int y, int x)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 200
typedef double real_t;
#else
typedef float real_t;
#endif
float ratio = (float)newSize / oldSize;
real_t d = 0;
#pragma unroll
for (int k = 0; k < N; ++k)
{
int dx1 = __float2int_rn(ratio * src[k][0]);
int dy1 = __float2int_rn(ratio * src[k][1]);
int dx2 = __float2int_rn(ratio * src[k][2]);
int dy2 = __float2int_rn(ratio * src[k][3]);
real_t t = 0;
t += tex2D(sumTex, x + dx1, y + dy1);
t -= tex2D(sumTex, x + dx1, y + dy2);
t -= tex2D(sumTex, x + dx2, y + dy1);
t += tex2D(sumTex, x + dx2, y + dy2);
d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1));
}
return (float)d;
}
////////////////////////////////////////////////////////////////////////
// Hessian
__constant__ float c_DX [3][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} };
__constant__ float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} };
__constant__ float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} };
__host__ __device__ __forceinline__ int calcSize(int octave, int layer)
{
/* Wavelet size at first layer of first octave. */
const int HAAR_SIZE0 = 9;
/* Wavelet size increment between layers. This should be an even number,
such that the wavelet sizes in an octave are either all even or all odd.
This ensures that when looking for the neighbours of a sample, the layers
above and below are aligned correctly. */
const int HAAR_SIZE_INC = 6;
return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave;
}
__global__ void icvCalcLayerDetAndTrace(PtrStepf det, PtrStepf trace)
{
// Determine the indices
const int gridDim_y = gridDim.y / (c_nOctaveLayers + 2);
const int blockIdx_y = blockIdx.y % gridDim_y;
const int blockIdx_z = blockIdx.y / gridDim_y;
const int j = threadIdx.x + blockIdx.x * blockDim.x;
const int i = threadIdx.y + blockIdx_y * blockDim.y;
const int layer = blockIdx_z;
const int size = calcSize(c_octave, layer);
const int samples_i = 1 + ((c_img_rows - size) >> c_octave);
const int samples_j = 1 + ((c_img_cols - size) >> c_octave);
// Ignore pixels where some of the kernel is outside the image
const int margin = (size >> 1) >> c_octave;
if (size <= c_img_rows && size <= c_img_cols && i < samples_i && j < samples_j)
{
const float dx = icvCalcHaarPatternSum<3>(c_DX , 9, size, (i << c_octave), (j << c_octave));
const float dy = icvCalcHaarPatternSum<3>(c_DY , 9, size, (i << c_octave), (j << c_octave));
const float dxy = icvCalcHaarPatternSum<4>(c_DXY, 9, size, (i << c_octave), (j << c_octave));
det.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx * dy - 0.81f * dxy * dxy;
trace.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx + dy;
}
}
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayers)
{
const int min_size = calcSize(octave, 0);
const int max_samples_i = 1 + ((img_rows - min_size) >> octave);
const int max_samples_j = 1 + ((img_cols - min_size) >> octave);
dim3 threads(16, 16);
dim3 grid;
grid.x = divUp(max_samples_j, threads.x);
grid.y = divUp(max_samples_i, threads.y) * (nOctaveLayers + 2);
hipLaunchKernelGGL(( icvCalcLayerDetAndTrace), dim3(grid), dim3(threads), 0, 0, det, trace);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// NONMAX
__constant__ float c_DM[5] = {0, 0, 9, 9, 1};
struct WithMask
{
static __device__ bool check(int sum_i, int sum_j, int size)
{
float ratio = (float)size / 9.0f;
float d = 0;
int dx1 = __float2int_rn(ratio * c_DM[0]);
int dy1 = __float2int_rn(ratio * c_DM[1]);
int dx2 = __float2int_rn(ratio * c_DM[2]);
int dy2 = __float2int_rn(ratio * c_DM[3]);
float t = 0;
t += tex2D(maskSumTex, sum_j + dx1, sum_i + dy1);
t -= tex2D(maskSumTex, sum_j + dx1, sum_i + dy2);
t -= tex2D(maskSumTex, sum_j + dx2, sum_i + dy1);
t += tex2D(maskSumTex, sum_j + dx2, sum_i + dy2);
d += t * c_DM[4] / ((dx2 - dx1) * (dy2 - dy1));
return (d >= 0.5f);
}
};
template <typename Mask>
__global__ void icvFindMaximaInLayer(const PtrStepf det, const PtrStepf trace, int4* maxPosBuffer,
unsigned int* maxCounter)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
extern __shared__ float N9[];
// The hidx variables are the indices to the hessian buffer.
const int gridDim_y = gridDim.y / c_nOctaveLayers;
const int blockIdx_y = blockIdx.y % gridDim_y;
const int blockIdx_z = blockIdx.y / gridDim_y;
const int layer = blockIdx_z + 1;
const int size = calcSize(c_octave, layer);
// Ignore pixels without a 3x3x3 neighbourhood in the layer above
const int margin = ((calcSize(c_octave, layer + 1) >> 1) >> c_octave) + 1;
const int j = threadIdx.x + blockIdx.x * (blockDim.x - 2) + margin - 1;
const int i = threadIdx.y + blockIdx_y * (blockDim.y - 2) + margin - 1;
// Is this thread within the hessian buffer?
const int zoff = blockDim.x * blockDim.y;
const int localLin = threadIdx.x + threadIdx.y * blockDim.x + zoff;
N9[localLin - zoff] = det.ptr(c_layer_rows * (layer - 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
N9[localLin ] = det.ptr(c_layer_rows * (layer ) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
N9[localLin + zoff] = det.ptr(c_layer_rows * (layer + 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
__syncthreads();
if (i < c_layer_rows - margin && j < c_layer_cols - margin && threadIdx.x > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y > 0 && threadIdx.y < blockDim.y - 1)
{
float val0 = N9[localLin];
if (val0 > c_hessianThreshold)
{
// Coordinates for the start of the wavelet in the sum image. There
// is some integer division involved, so don't try to simplify this
// (cancel out sampleStep) without checking the result is the same
const int sum_i = (i - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (j - ((size >> 1) >> c_octave)) << c_octave;
if (Mask::check(sum_i, sum_j, size))
{
// Check to see if we have a max (in its 26 neighbours)
const bool condmax = val0 > N9[localLin - 1 - blockDim.x - zoff]
&& val0 > N9[localLin - blockDim.x - zoff]
&& val0 > N9[localLin + 1 - blockDim.x - zoff]
&& val0 > N9[localLin - 1 - zoff]
&& val0 > N9[localLin - zoff]
&& val0 > N9[localLin + 1 - zoff]
&& val0 > N9[localLin - 1 + blockDim.x - zoff]
&& val0 > N9[localLin + blockDim.x - zoff]
&& val0 > N9[localLin + 1 + blockDim.x - zoff]
&& val0 > N9[localLin - 1 - blockDim.x]
&& val0 > N9[localLin - blockDim.x]
&& val0 > N9[localLin + 1 - blockDim.x]
&& val0 > N9[localLin - 1 ]
&& val0 > N9[localLin + 1 ]
&& val0 > N9[localLin - 1 + blockDim.x]
&& val0 > N9[localLin + blockDim.x]
&& val0 > N9[localLin + 1 + blockDim.x]
&& val0 > N9[localLin - 1 - blockDim.x + zoff]
&& val0 > N9[localLin - blockDim.x + zoff]
&& val0 > N9[localLin + 1 - blockDim.x + zoff]
&& val0 > N9[localLin - 1 + zoff]
&& val0 > N9[localLin + zoff]
&& val0 > N9[localLin + 1 + zoff]
&& val0 > N9[localLin - 1 + blockDim.x + zoff]
&& val0 > N9[localLin + blockDim.x + zoff]
&& val0 > N9[localLin + 1 + blockDim.x + zoff]
;
if(condmax)
{
unsigned int ind = atomicInc(maxCounter,(unsigned int) -1);
if (ind < c_max_candidates)
{
const int laplacian = (int) copysignf(1.0f, trace.ptr(layer * c_layer_rows + i)[j]);
maxPosBuffer[ind] = make_int4(j, i, layer, laplacian);
}
}
}
}
}
#endif
}
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nOctaveLayers)
{
const int layer_rows = img_rows >> octave;
const int layer_cols = img_cols >> octave;
const int min_margin = ((calcSize(octave, 2) >> 1) >> octave) + 1;
dim3 threads(16, 16);
dim3 grid;
grid.x = divUp(layer_cols - 2 * min_margin, threads.x - 2);
grid.y = divUp(layer_rows - 2 * min_margin, threads.y - 2) * nOctaveLayers;
const size_t smem_size = threads.x * threads.y * 3 * sizeof(float);
if (use_mask)
hipLaunchKernelGGL(( icvFindMaximaInLayer<WithMask>), dim3(grid), dim3(threads), smem_size, 0, det, trace, maxPosBuffer, maxCounter);
else
hipLaunchKernelGGL(( icvFindMaximaInLayer<WithOutMask>), dim3(grid), dim3(threads), smem_size, 0, det, trace, maxPosBuffer, maxCounter);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// INTERPOLATION
__global__ void icvInterpolateKeypoint(const PtrStepf det, const int4* maxPosBuffer,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
const int4 maxPos = maxPosBuffer[blockIdx.x];
const int j = maxPos.x - 1 + threadIdx.x;
const int i = maxPos.y - 1 + threadIdx.y;
const int layer = maxPos.z - 1 + threadIdx.z;
__shared__ float N9[3][3][3];
N9[threadIdx.z][threadIdx.y][threadIdx.x] = det.ptr(c_layer_rows * layer + i)[j];
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0)
{
__shared__ float dD[3];
//dx
dD[0] = -0.5f * (N9[1][1][2] - N9[1][1][0]);
//dy
dD[1] = -0.5f * (N9[1][2][1] - N9[1][0][1]);
//ds
dD[2] = -0.5f * (N9[2][1][1] - N9[0][1][1]);
__shared__ float H[3][3];
//dxx
H[0][0] = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2];
//dxy
H[0][1]= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]);
//dxs
H[0][2]= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]);
//dyx = dxy
H[1][0] = H[0][1];
//dyy
H[1][1] = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1];
//dys
H[1][2]= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]);
//dsx = dxs
H[2][0] = H[0][2];
//dsy = dys
H[2][1] = H[1][2];
//dss
H[2][2] = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1];
__shared__ float x[3];
if (solve3x3(H, dD, x))
{
if (::fabs(x[0]) <= 1.f && ::fabs(x[1]) <= 1.f && ::fabs(x[2]) <= 1.f)
{
// if the step is within the interpolation region, perform it
const int size = calcSize(c_octave, maxPos.z);
const int sum_i = (maxPos.y - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (maxPos.x - ((size >> 1) >> c_octave)) << c_octave;
const float center_i = sum_i + (float)(size - 1) / 2;
const float center_j = sum_j + (float)(size - 1) / 2;
const float px = center_j + x[0] * (1 << c_octave);
const float py = center_i + x[1] * (1 << c_octave);
const int ds = size - calcSize(c_octave, maxPos.z - 1);
const float psize = roundf(size + x[2] * ds);
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = psize * 1.2f / 9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
// check when grad_wav_size is too big
if ((c_img_rows + 1) >= grad_wav_size && (c_img_cols + 1) >= grad_wav_size)
{
// Get a new feature index.
unsigned int ind = atomicInc(featureCounter, (unsigned int)-1);
if (ind < c_max_features)
{
featureX[ind] = px;
featureY[ind] = py;
featureLaplacian[ind] = maxPos.w;
featureOctave[ind] = c_octave;
featureSize[ind] = psize;
featureHessian[ind] = N9[1][1][1];
}
} // grad_wav_size check
} // If the subpixel interpolation worked
}
} // If this is thread 0.
#endif
}
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter)
{
dim3 threads;
threads.x = 3;
threads.y = 3;
threads.z = 3;
dim3 grid;
grid.x = maxCounter;
hipLaunchKernelGGL(( icvInterpolateKeypoint), dim3(grid), dim3(threads), 0, 0, det, maxPosBuffer, featureX, featureY, featureLaplacian, featureOctave, featureSize, featureHessian, featureCounter);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// Orientation
#define ORI_SEARCH_INC 5
#define ORI_WIN 60
#define ORI_SAMPLES 113
__constant__ float c_aptX[ORI_SAMPLES] = {-6, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6};
__constant__ float c_aptY[ORI_SAMPLES] = {0, -3, -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, 4, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, 0};
__constant__ float c_aptW[ORI_SAMPLES] = {0.001455130288377404f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f, 0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f, 0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f, 0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.001455130288377404f};
__constant__ float c_NX[2][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}};
__constant__ float c_NY[2][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}};
__global__ void icvCalcOrientation(const float* featureX, const float* featureY, const float* featureSize, float* featureDir)
{
__shared__ float s_X[128];
__shared__ float s_Y[128];
__shared__ float s_angle[128];
__shared__ float s_sumx[32 * 4];
__shared__ float s_sumy[32 * 4];
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = featureSize[blockIdx.x] * 1.2f / 9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
// check when grad_wav_size is too big
if ((c_img_rows + 1) < grad_wav_size || (c_img_cols + 1) < grad_wav_size)
return;
// Calc X, Y, angle and store it to shared memory
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
float X = 0.0f, Y = 0.0f, angle = 0.0f;
if (tid < ORI_SAMPLES)
{
const float margin = (float)(grad_wav_size - 1) / 2.0f;
const int x = __float2int_rn(featureX[blockIdx.x] + c_aptX[tid] * s - margin);
const int y = __float2int_rn(featureY[blockIdx.x] + c_aptY[tid] * s - margin);
if (y >= 0 && y < (c_img_rows + 1) - grad_wav_size &&
x >= 0 && x < (c_img_cols + 1) - grad_wav_size)
{
X = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NX, 4, grad_wav_size, y, x);
Y = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NY, 4, grad_wav_size, y, x);
angle = atan2f(Y, X);
if (angle < 0)
angle += 2.0f * CV_PI_F;
angle *= 180.0f / CV_PI_F;
}
}
s_X[tid] = X;
s_Y[tid] = Y;
s_angle[tid] = angle;
__syncthreads();
float bestx = 0, besty = 0, best_mod = 0;
#if __CUDA_ARCH__ >= 200
#pragma unroll
#endif
for (int i = 0; i < 18; ++i)
{
const int dir = (i * 4 + threadIdx.y) * ORI_SEARCH_INC;
float sumx = 0.0f, sumy = 0.0f;
int d = ::abs(__float2int_rn(s_angle[threadIdx.x]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx = s_X[threadIdx.x];
sumy = s_Y[threadIdx.x];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 32]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 32];
sumy += s_Y[threadIdx.x + 32];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 64]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 64];
sumy += s_Y[threadIdx.x + 64];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 96]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 96];
sumy += s_Y[threadIdx.x + 96];
}
plus<float> op;
device::reduce<32>(smem_tuple(s_sumx + threadIdx.y * 32, s_sumy + threadIdx.y * 32),
thrust::tie(sumx, sumy), threadIdx.x, thrust::make_tuple(op, op));
const float temp_mod = sumx * sumx + sumy * sumy;
if (temp_mod > best_mod)
{
best_mod = temp_mod;
bestx = sumx;
besty = sumy;
}
__syncthreads();
}
if (threadIdx.x == 0)
{
s_X[threadIdx.y] = bestx;
s_Y[threadIdx.y] = besty;
s_angle[threadIdx.y] = best_mod;
}
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0)
{
int bestIdx = 0;
if (s_angle[1] > s_angle[bestIdx])
bestIdx = 1;
if (s_angle[2] > s_angle[bestIdx])
bestIdx = 2;
if (s_angle[3] > s_angle[bestIdx])
bestIdx = 3;
float kp_dir = atan2f(s_Y[bestIdx], s_X[bestIdx]);
if (kp_dir < 0)
kp_dir += 2.0f * CV_PI_F;
kp_dir *= 180.0f / CV_PI_F;
kp_dir = 360.0f - kp_dir;
if (::fabsf(kp_dir - 360.f) < numeric_limits<float>::epsilon())
kp_dir = 0.f;
featureDir[blockIdx.x] = kp_dir;
}
}
#undef ORI_SEARCH_INC
#undef ORI_WIN
#undef ORI_SAMPLES
void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures)
{
dim3 threads;
threads.x = 32;
threads.y = 4;
dim3 grid;
grid.x = nFeatures;
hipLaunchKernelGGL(( icvCalcOrientation), dim3(grid), dim3(threads), 0, 0, featureX, featureY, featureSize, featureDir);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// Descriptors
#define PATCH_SZ 20
__constant__ float c_DW[PATCH_SZ * PATCH_SZ] =
{
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f,
8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f
};
struct WinReader
{
typedef uchar elem_type;
__device__ __forceinline__ uchar operator ()(int i, int j) const
{
float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir;
float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir;
return tex2D(imgTex, pixel_x, pixel_y);
}
float centerX;
float centerY;
float win_offset;
float cos_dir;
float sin_dir;
int width;
int height;
};
__device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir,
float& dx, float& dy);
__device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir,
float& dx, float& dy)
{
__shared__ float s_PATCH[PATCH_SZ + 1][PATCH_SZ + 1];
dx = dy = 0.0f;
WinReader win;
win.centerX = featureX[blockIdx.x];
win.centerY = featureY[blockIdx.x];
// The sampling intervals and wavelet sized for selecting an orientation
// and building the keypoint descriptor are defined relative to 's'
const float s = featureSize[blockIdx.x] * 1.2f / 9.0f;
// Extract a window of pixels around the keypoint of size 20s
const int win_size = (int)((PATCH_SZ + 1) * s);
win.width = win.height = win_size;
// Nearest neighbour version (faster)
win.win_offset = -(win_size - 1.0f) / 2.0f;
float descriptor_dir = 360.0f - featureDir[blockIdx.x];
if (::fabsf(descriptor_dir - 360.f) < numeric_limits<float>::epsilon())
descriptor_dir = 0.f;
descriptor_dir *= CV_PI_F / 180.0f;
sincosf(descriptor_dir, &win.sin_dir, &win.cos_dir);
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int xLoadInd = tid % (PATCH_SZ + 1);
const int yLoadInd = tid / (PATCH_SZ + 1);
if (yLoadInd < (PATCH_SZ + 1))
{
if (s > 1)
{
AreaFilter<WinReader> filter(win, s, s);
s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd, xLoadInd);
}
else
{
LinearFilter<WinReader> filter(win);
s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd * s, xLoadInd * s);
}
}
__syncthreads();
const int xPatchInd = threadIdx.x % 5;
const int yPatchInd = threadIdx.x / 5;
if (yPatchInd < 5)
{
const int xBlockInd = threadIdx.y % 4;
const int yBlockInd = threadIdx.y / 4;
const int xInd = xBlockInd * 5 + xPatchInd;
const int yInd = yBlockInd * 5 + yPatchInd;
const float dw = c_DW[yInd * PATCH_SZ + xInd];
dx = (s_PATCH[yInd ][xInd + 1] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd + 1][xInd ]) * dw;
dy = (s_PATCH[yInd + 1][xInd ] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd ][xInd + 1]) * dw;
}
}
__global__ void compute_descriptors_64(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
__shared__ float smem[32 * 16];
float* sRow = smem + threadIdx.y * 32;
float dx, dy;
calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy);
float dxabs = ::fabsf(dx);
float dyabs = ::fabsf(dy);
plus<float> op;
reduce<32>(sRow, dx, threadIdx.x, op);
reduce<32>(sRow, dy, threadIdx.x, op);
reduce<32>(sRow, dxabs, threadIdx.x, op);
reduce<32>(sRow, dyabs, threadIdx.x, op);
float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y;
// write dx, dy, |dx|, |dy|
if (threadIdx.x == 0)
*descriptors_block = make_float4(dx, dy, dxabs, dyabs);
}
__global__ void compute_descriptors_128(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
__shared__ float smem[32 * 16];
float* sRow = smem + threadIdx.y * 32;
float dx, dy;
calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy);
float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y * 2;
plus<float> op;
float d1 = 0.0f;
float d2 = 0.0f;
float abs1 = 0.0f;
float abs2 = 0.0f;
if (dy >= 0)
{
d1 = dx;
abs1 = ::fabsf(dx);
}
else
{
d2 = dx;
abs2 = ::fabsf(dx);
}
reduce<32>(sRow, d1, threadIdx.x, op);
reduce<32>(sRow, d2, threadIdx.x, op);
reduce<32>(sRow, abs1, threadIdx.x, op);
reduce<32>(sRow, abs2, threadIdx.x, op);
// write dx (dy >= 0), |dx| (dy >= 0), dx (dy < 0), |dx| (dy < 0)
if (threadIdx.x == 0)
descriptors_block[0] = make_float4(d1, abs1, d2, abs2);
if (dx >= 0)
{
d1 = dy;
abs1 = ::fabsf(dy);
d2 = 0.0f;
abs2 = 0.0f;
}
else
{
d1 = 0.0f;
abs1 = 0.0f;
d2 = dy;
abs2 = ::fabsf(dy);
}
reduce<32>(sRow, d1, threadIdx.x, op);
reduce<32>(sRow, d2, threadIdx.x, op);
reduce<32>(sRow, abs1, threadIdx.x, op);
reduce<32>(sRow, abs2, threadIdx.x, op);
// write dy (dx >= 0), |dy| (dx >= 0), dy (dx < 0), |dy| (dx < 0)
if (threadIdx.x == 0)
descriptors_block[1] = make_float4(d1, abs1, d2, abs2);
}
template <int BLOCK_DIM_X> __global__ void normalize_descriptors(PtrStepf descriptors)
{
__shared__ float smem[BLOCK_DIM_X];
__shared__ float s_len;
// no need for thread ID
float* descriptor_base = descriptors.ptr(blockIdx.x);
// read in the unnormalized descriptor values (squared)
const float val = descriptor_base[threadIdx.x];
float len = val * val;
reduce<BLOCK_DIM_X>(smem, len, threadIdx.x, plus<float>());
if (threadIdx.x == 0)
s_len = ::sqrtf(len);
__syncthreads();
// normalize and store in output
descriptor_base[threadIdx.x] = val / s_len;
}
void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures)
{
// compute unnormalized descriptors, then normalize them - odd indexing since grid must be 2D
if (descriptors.cols == 64)
{
hipLaunchKernelGGL(( compute_descriptors_64), dim3(nFeatures), dim3(dim3(32, 16)), 0, 0, descriptors, featureX, featureY, featureSize, featureDir);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
hipLaunchKernelGGL(( normalize_descriptors<64>), dim3(nFeatures), dim3(64), 0, 0, (PtrStepSzf) descriptors);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
else
{
hipLaunchKernelGGL(( compute_descriptors_128), dim3(nFeatures), dim3(dim3(32, 16)), 0, 0, descriptors, featureX, featureY, featureSize, featureDir);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
hipLaunchKernelGGL(( normalize_descriptors<128>), dim3(nFeatures), dim3(128), 0, 0, (PtrStepSzf) descriptors);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
}
} // namespace surf
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */
| d80c474c118c632abb465cd2552baef9ee4f168b.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_GPU
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/reduce.hpp"
#include "opencv2/gpu/device/utility.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/filters.hpp"
namespace cv { namespace gpu { namespace device
{
namespace surf
{
void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold);
void loadOctaveConstants(int octave, int layer_rows, int layer_cols);
void bindImgTex(PtrStepSzb img);
size_t bindSumTex(PtrStepSz<unsigned int> sum);
size_t bindMaskSumTex(PtrStepSz<unsigned int> maskSum);
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayer);
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nLayers);
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter);
void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures);
void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures);
}
}}}
namespace cv { namespace gpu { namespace device
{
namespace surf
{
////////////////////////////////////////////////////////////////////////
// Global parameters
// The maximum number of features (before subpixel interpolation) that memory is reserved for.
__constant__ int c_max_candidates;
// The maximum number of features that memory is reserved for.
__constant__ int c_max_features;
// The image size.
__constant__ int c_img_rows;
__constant__ int c_img_cols;
// The number of layers.
__constant__ int c_nOctaveLayers;
// The hessian threshold.
__constant__ float c_hessianThreshold;
// The current octave.
__constant__ int c_octave;
// The current layer size.
__constant__ int c_layer_rows;
__constant__ int c_layer_cols;
void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold)
{
cudaSafeCall( cudaMemcpyToSymbol(c_max_candidates, &maxCandidates, sizeof(maxCandidates)) );
cudaSafeCall( cudaMemcpyToSymbol(c_max_features, &maxFeatures, sizeof(maxFeatures)) );
cudaSafeCall( cudaMemcpyToSymbol(c_img_rows, &img_rows, sizeof(img_rows)) );
cudaSafeCall( cudaMemcpyToSymbol(c_img_cols, &img_cols, sizeof(img_cols)) );
cudaSafeCall( cudaMemcpyToSymbol(c_nOctaveLayers, &nOctaveLayers, sizeof(nOctaveLayers)) );
cudaSafeCall( cudaMemcpyToSymbol(c_hessianThreshold, &hessianThreshold, sizeof(hessianThreshold)) );
}
void loadOctaveConstants(int octave, int layer_rows, int layer_cols)
{
cudaSafeCall( cudaMemcpyToSymbol(c_octave, &octave, sizeof(octave)) );
cudaSafeCall( cudaMemcpyToSymbol(c_layer_rows, &layer_rows, sizeof(layer_rows)) );
cudaSafeCall( cudaMemcpyToSymbol(c_layer_cols, &layer_cols, sizeof(layer_cols)) );
}
////////////////////////////////////////////////////////////////////////
// Integral image texture
texture<unsigned char, 2, cudaReadModeElementType> imgTex(0, cudaFilterModePoint, cudaAddressModeClamp);
texture<unsigned int, 2, cudaReadModeElementType> sumTex(0, cudaFilterModePoint, cudaAddressModeClamp);
texture<unsigned int, 2, cudaReadModeElementType> maskSumTex(0, cudaFilterModePoint, cudaAddressModeClamp);
void bindImgTex(PtrStepSzb img)
{
bindTexture(&imgTex, img);
}
size_t bindSumTex(PtrStepSz<uint> sum)
{
size_t offset;
cudaChannelFormatDesc desc_sum = cudaCreateChannelDesc<uint>();
cudaSafeCall( cudaBindTexture2D(&offset, sumTex, sum.data, desc_sum, sum.cols, sum.rows, sum.step));
return offset / sizeof(uint);
}
size_t bindMaskSumTex(PtrStepSz<uint> maskSum)
{
size_t offset;
cudaChannelFormatDesc desc_sum = cudaCreateChannelDesc<uint>();
cudaSafeCall( cudaBindTexture2D(&offset, maskSumTex, maskSum.data, desc_sum, maskSum.cols, maskSum.rows, maskSum.step));
return offset / sizeof(uint);
}
template <int N> __device__ float icvCalcHaarPatternSum(const float src[][5], int oldSize, int newSize, int y, int x)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 200
typedef double real_t;
#else
typedef float real_t;
#endif
float ratio = (float)newSize / oldSize;
real_t d = 0;
#pragma unroll
for (int k = 0; k < N; ++k)
{
int dx1 = __float2int_rn(ratio * src[k][0]);
int dy1 = __float2int_rn(ratio * src[k][1]);
int dx2 = __float2int_rn(ratio * src[k][2]);
int dy2 = __float2int_rn(ratio * src[k][3]);
real_t t = 0;
t += tex2D(sumTex, x + dx1, y + dy1);
t -= tex2D(sumTex, x + dx1, y + dy2);
t -= tex2D(sumTex, x + dx2, y + dy1);
t += tex2D(sumTex, x + dx2, y + dy2);
d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1));
}
return (float)d;
}
////////////////////////////////////////////////////////////////////////
// Hessian
__constant__ float c_DX [3][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} };
__constant__ float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} };
__constant__ float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} };
__host__ __device__ __forceinline__ int calcSize(int octave, int layer)
{
/* Wavelet size at first layer of first octave. */
const int HAAR_SIZE0 = 9;
/* Wavelet size increment between layers. This should be an even number,
such that the wavelet sizes in an octave are either all even or all odd.
This ensures that when looking for the neighbours of a sample, the layers
above and below are aligned correctly. */
const int HAAR_SIZE_INC = 6;
return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave;
}
__global__ void icvCalcLayerDetAndTrace(PtrStepf det, PtrStepf trace)
{
// Determine the indices
const int gridDim_y = gridDim.y / (c_nOctaveLayers + 2);
const int blockIdx_y = blockIdx.y % gridDim_y;
const int blockIdx_z = blockIdx.y / gridDim_y;
const int j = threadIdx.x + blockIdx.x * blockDim.x;
const int i = threadIdx.y + blockIdx_y * blockDim.y;
const int layer = blockIdx_z;
const int size = calcSize(c_octave, layer);
const int samples_i = 1 + ((c_img_rows - size) >> c_octave);
const int samples_j = 1 + ((c_img_cols - size) >> c_octave);
// Ignore pixels where some of the kernel is outside the image
const int margin = (size >> 1) >> c_octave;
if (size <= c_img_rows && size <= c_img_cols && i < samples_i && j < samples_j)
{
const float dx = icvCalcHaarPatternSum<3>(c_DX , 9, size, (i << c_octave), (j << c_octave));
const float dy = icvCalcHaarPatternSum<3>(c_DY , 9, size, (i << c_octave), (j << c_octave));
const float dxy = icvCalcHaarPatternSum<4>(c_DXY, 9, size, (i << c_octave), (j << c_octave));
det.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx * dy - 0.81f * dxy * dxy;
trace.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx + dy;
}
}
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayers)
{
const int min_size = calcSize(octave, 0);
const int max_samples_i = 1 + ((img_rows - min_size) >> octave);
const int max_samples_j = 1 + ((img_cols - min_size) >> octave);
dim3 threads(16, 16);
dim3 grid;
grid.x = divUp(max_samples_j, threads.x);
grid.y = divUp(max_samples_i, threads.y) * (nOctaveLayers + 2);
icvCalcLayerDetAndTrace<<<grid, threads>>>(det, trace);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// NONMAX
__constant__ float c_DM[5] = {0, 0, 9, 9, 1};
struct WithMask
{
static __device__ bool check(int sum_i, int sum_j, int size)
{
float ratio = (float)size / 9.0f;
float d = 0;
int dx1 = __float2int_rn(ratio * c_DM[0]);
int dy1 = __float2int_rn(ratio * c_DM[1]);
int dx2 = __float2int_rn(ratio * c_DM[2]);
int dy2 = __float2int_rn(ratio * c_DM[3]);
float t = 0;
t += tex2D(maskSumTex, sum_j + dx1, sum_i + dy1);
t -= tex2D(maskSumTex, sum_j + dx1, sum_i + dy2);
t -= tex2D(maskSumTex, sum_j + dx2, sum_i + dy1);
t += tex2D(maskSumTex, sum_j + dx2, sum_i + dy2);
d += t * c_DM[4] / ((dx2 - dx1) * (dy2 - dy1));
return (d >= 0.5f);
}
};
template <typename Mask>
__global__ void icvFindMaximaInLayer(const PtrStepf det, const PtrStepf trace, int4* maxPosBuffer,
unsigned int* maxCounter)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
extern __shared__ float N9[];
// The hidx variables are the indices to the hessian buffer.
const int gridDim_y = gridDim.y / c_nOctaveLayers;
const int blockIdx_y = blockIdx.y % gridDim_y;
const int blockIdx_z = blockIdx.y / gridDim_y;
const int layer = blockIdx_z + 1;
const int size = calcSize(c_octave, layer);
// Ignore pixels without a 3x3x3 neighbourhood in the layer above
const int margin = ((calcSize(c_octave, layer + 1) >> 1) >> c_octave) + 1;
const int j = threadIdx.x + blockIdx.x * (blockDim.x - 2) + margin - 1;
const int i = threadIdx.y + blockIdx_y * (blockDim.y - 2) + margin - 1;
// Is this thread within the hessian buffer?
const int zoff = blockDim.x * blockDim.y;
const int localLin = threadIdx.x + threadIdx.y * blockDim.x + zoff;
N9[localLin - zoff] = det.ptr(c_layer_rows * (layer - 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
N9[localLin ] = det.ptr(c_layer_rows * (layer ) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
N9[localLin + zoff] = det.ptr(c_layer_rows * (layer + 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
__syncthreads();
if (i < c_layer_rows - margin && j < c_layer_cols - margin && threadIdx.x > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y > 0 && threadIdx.y < blockDim.y - 1)
{
float val0 = N9[localLin];
if (val0 > c_hessianThreshold)
{
// Coordinates for the start of the wavelet in the sum image. There
// is some integer division involved, so don't try to simplify this
// (cancel out sampleStep) without checking the result is the same
const int sum_i = (i - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (j - ((size >> 1) >> c_octave)) << c_octave;
if (Mask::check(sum_i, sum_j, size))
{
// Check to see if we have a max (in its 26 neighbours)
const bool condmax = val0 > N9[localLin - 1 - blockDim.x - zoff]
&& val0 > N9[localLin - blockDim.x - zoff]
&& val0 > N9[localLin + 1 - blockDim.x - zoff]
&& val0 > N9[localLin - 1 - zoff]
&& val0 > N9[localLin - zoff]
&& val0 > N9[localLin + 1 - zoff]
&& val0 > N9[localLin - 1 + blockDim.x - zoff]
&& val0 > N9[localLin + blockDim.x - zoff]
&& val0 > N9[localLin + 1 + blockDim.x - zoff]
&& val0 > N9[localLin - 1 - blockDim.x]
&& val0 > N9[localLin - blockDim.x]
&& val0 > N9[localLin + 1 - blockDim.x]
&& val0 > N9[localLin - 1 ]
&& val0 > N9[localLin + 1 ]
&& val0 > N9[localLin - 1 + blockDim.x]
&& val0 > N9[localLin + blockDim.x]
&& val0 > N9[localLin + 1 + blockDim.x]
&& val0 > N9[localLin - 1 - blockDim.x + zoff]
&& val0 > N9[localLin - blockDim.x + zoff]
&& val0 > N9[localLin + 1 - blockDim.x + zoff]
&& val0 > N9[localLin - 1 + zoff]
&& val0 > N9[localLin + zoff]
&& val0 > N9[localLin + 1 + zoff]
&& val0 > N9[localLin - 1 + blockDim.x + zoff]
&& val0 > N9[localLin + blockDim.x + zoff]
&& val0 > N9[localLin + 1 + blockDim.x + zoff]
;
if(condmax)
{
unsigned int ind = atomicInc(maxCounter,(unsigned int) -1);
if (ind < c_max_candidates)
{
const int laplacian = (int) copysignf(1.0f, trace.ptr(layer * c_layer_rows + i)[j]);
maxPosBuffer[ind] = make_int4(j, i, layer, laplacian);
}
}
}
}
}
#endif
}
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nOctaveLayers)
{
const int layer_rows = img_rows >> octave;
const int layer_cols = img_cols >> octave;
const int min_margin = ((calcSize(octave, 2) >> 1) >> octave) + 1;
dim3 threads(16, 16);
dim3 grid;
grid.x = divUp(layer_cols - 2 * min_margin, threads.x - 2);
grid.y = divUp(layer_rows - 2 * min_margin, threads.y - 2) * nOctaveLayers;
const size_t smem_size = threads.x * threads.y * 3 * sizeof(float);
if (use_mask)
icvFindMaximaInLayer<WithMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter);
else
icvFindMaximaInLayer<WithOutMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// INTERPOLATION
__global__ void icvInterpolateKeypoint(const PtrStepf det, const int4* maxPosBuffer,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
const int4 maxPos = maxPosBuffer[blockIdx.x];
const int j = maxPos.x - 1 + threadIdx.x;
const int i = maxPos.y - 1 + threadIdx.y;
const int layer = maxPos.z - 1 + threadIdx.z;
__shared__ float N9[3][3][3];
N9[threadIdx.z][threadIdx.y][threadIdx.x] = det.ptr(c_layer_rows * layer + i)[j];
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0)
{
__shared__ float dD[3];
//dx
dD[0] = -0.5f * (N9[1][1][2] - N9[1][1][0]);
//dy
dD[1] = -0.5f * (N9[1][2][1] - N9[1][0][1]);
//ds
dD[2] = -0.5f * (N9[2][1][1] - N9[0][1][1]);
__shared__ float H[3][3];
//dxx
H[0][0] = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2];
//dxy
H[0][1]= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]);
//dxs
H[0][2]= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]);
//dyx = dxy
H[1][0] = H[0][1];
//dyy
H[1][1] = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1];
//dys
H[1][2]= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]);
//dsx = dxs
H[2][0] = H[0][2];
//dsy = dys
H[2][1] = H[1][2];
//dss
H[2][2] = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1];
__shared__ float x[3];
if (solve3x3(H, dD, x))
{
if (::fabs(x[0]) <= 1.f && ::fabs(x[1]) <= 1.f && ::fabs(x[2]) <= 1.f)
{
// if the step is within the interpolation region, perform it
const int size = calcSize(c_octave, maxPos.z);
const int sum_i = (maxPos.y - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (maxPos.x - ((size >> 1) >> c_octave)) << c_octave;
const float center_i = sum_i + (float)(size - 1) / 2;
const float center_j = sum_j + (float)(size - 1) / 2;
const float px = center_j + x[0] * (1 << c_octave);
const float py = center_i + x[1] * (1 << c_octave);
const int ds = size - calcSize(c_octave, maxPos.z - 1);
const float psize = roundf(size + x[2] * ds);
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = psize * 1.2f / 9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
// check when grad_wav_size is too big
if ((c_img_rows + 1) >= grad_wav_size && (c_img_cols + 1) >= grad_wav_size)
{
// Get a new feature index.
unsigned int ind = atomicInc(featureCounter, (unsigned int)-1);
if (ind < c_max_features)
{
featureX[ind] = px;
featureY[ind] = py;
featureLaplacian[ind] = maxPos.w;
featureOctave[ind] = c_octave;
featureSize[ind] = psize;
featureHessian[ind] = N9[1][1][1];
}
} // grad_wav_size check
} // If the subpixel interpolation worked
}
} // If this is thread 0.
#endif
}
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter)
{
dim3 threads;
threads.x = 3;
threads.y = 3;
threads.z = 3;
dim3 grid;
grid.x = maxCounter;
icvInterpolateKeypoint<<<grid, threads>>>(det, maxPosBuffer, featureX, featureY, featureLaplacian, featureOctave, featureSize, featureHessian, featureCounter);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// Orientation
#define ORI_SEARCH_INC 5
#define ORI_WIN 60
#define ORI_SAMPLES 113
__constant__ float c_aptX[ORI_SAMPLES] = {-6, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6};
__constant__ float c_aptY[ORI_SAMPLES] = {0, -3, -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, 4, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, 0};
__constant__ float c_aptW[ORI_SAMPLES] = {0.001455130288377404f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f, 0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f, 0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f, 0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.001455130288377404f};
__constant__ float c_NX[2][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}};
__constant__ float c_NY[2][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}};
__global__ void icvCalcOrientation(const float* featureX, const float* featureY, const float* featureSize, float* featureDir)
{
__shared__ float s_X[128];
__shared__ float s_Y[128];
__shared__ float s_angle[128];
__shared__ float s_sumx[32 * 4];
__shared__ float s_sumy[32 * 4];
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = featureSize[blockIdx.x] * 1.2f / 9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
// check when grad_wav_size is too big
if ((c_img_rows + 1) < grad_wav_size || (c_img_cols + 1) < grad_wav_size)
return;
// Calc X, Y, angle and store it to shared memory
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
float X = 0.0f, Y = 0.0f, angle = 0.0f;
if (tid < ORI_SAMPLES)
{
const float margin = (float)(grad_wav_size - 1) / 2.0f;
const int x = __float2int_rn(featureX[blockIdx.x] + c_aptX[tid] * s - margin);
const int y = __float2int_rn(featureY[blockIdx.x] + c_aptY[tid] * s - margin);
if (y >= 0 && y < (c_img_rows + 1) - grad_wav_size &&
x >= 0 && x < (c_img_cols + 1) - grad_wav_size)
{
X = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NX, 4, grad_wav_size, y, x);
Y = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NY, 4, grad_wav_size, y, x);
angle = atan2f(Y, X);
if (angle < 0)
angle += 2.0f * CV_PI_F;
angle *= 180.0f / CV_PI_F;
}
}
s_X[tid] = X;
s_Y[tid] = Y;
s_angle[tid] = angle;
__syncthreads();
float bestx = 0, besty = 0, best_mod = 0;
#if __CUDA_ARCH__ >= 200
#pragma unroll
#endif
for (int i = 0; i < 18; ++i)
{
const int dir = (i * 4 + threadIdx.y) * ORI_SEARCH_INC;
float sumx = 0.0f, sumy = 0.0f;
int d = ::abs(__float2int_rn(s_angle[threadIdx.x]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx = s_X[threadIdx.x];
sumy = s_Y[threadIdx.x];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 32]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 32];
sumy += s_Y[threadIdx.x + 32];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 64]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 64];
sumy += s_Y[threadIdx.x + 64];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 96]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 96];
sumy += s_Y[threadIdx.x + 96];
}
plus<float> op;
device::reduce<32>(smem_tuple(s_sumx + threadIdx.y * 32, s_sumy + threadIdx.y * 32),
thrust::tie(sumx, sumy), threadIdx.x, thrust::make_tuple(op, op));
const float temp_mod = sumx * sumx + sumy * sumy;
if (temp_mod > best_mod)
{
best_mod = temp_mod;
bestx = sumx;
besty = sumy;
}
__syncthreads();
}
if (threadIdx.x == 0)
{
s_X[threadIdx.y] = bestx;
s_Y[threadIdx.y] = besty;
s_angle[threadIdx.y] = best_mod;
}
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0)
{
int bestIdx = 0;
if (s_angle[1] > s_angle[bestIdx])
bestIdx = 1;
if (s_angle[2] > s_angle[bestIdx])
bestIdx = 2;
if (s_angle[3] > s_angle[bestIdx])
bestIdx = 3;
float kp_dir = atan2f(s_Y[bestIdx], s_X[bestIdx]);
if (kp_dir < 0)
kp_dir += 2.0f * CV_PI_F;
kp_dir *= 180.0f / CV_PI_F;
kp_dir = 360.0f - kp_dir;
if (::fabsf(kp_dir - 360.f) < numeric_limits<float>::epsilon())
kp_dir = 0.f;
featureDir[blockIdx.x] = kp_dir;
}
}
#undef ORI_SEARCH_INC
#undef ORI_WIN
#undef ORI_SAMPLES
void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures)
{
dim3 threads;
threads.x = 32;
threads.y = 4;
dim3 grid;
grid.x = nFeatures;
icvCalcOrientation<<<grid, threads>>>(featureX, featureY, featureSize, featureDir);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// Descriptors
#define PATCH_SZ 20
__constant__ float c_DW[PATCH_SZ * PATCH_SZ] =
{
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f,
8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f
};
struct WinReader
{
typedef uchar elem_type;
__device__ __forceinline__ uchar operator ()(int i, int j) const
{
float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir;
float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir;
return tex2D(imgTex, pixel_x, pixel_y);
}
float centerX;
float centerY;
float win_offset;
float cos_dir;
float sin_dir;
int width;
int height;
};
__device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir,
float& dx, float& dy);
__device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir,
float& dx, float& dy)
{
__shared__ float s_PATCH[PATCH_SZ + 1][PATCH_SZ + 1];
dx = dy = 0.0f;
WinReader win;
win.centerX = featureX[blockIdx.x];
win.centerY = featureY[blockIdx.x];
// The sampling intervals and wavelet sized for selecting an orientation
// and building the keypoint descriptor are defined relative to 's'
const float s = featureSize[blockIdx.x] * 1.2f / 9.0f;
// Extract a window of pixels around the keypoint of size 20s
const int win_size = (int)((PATCH_SZ + 1) * s);
win.width = win.height = win_size;
// Nearest neighbour version (faster)
win.win_offset = -(win_size - 1.0f) / 2.0f;
float descriptor_dir = 360.0f - featureDir[blockIdx.x];
if (::fabsf(descriptor_dir - 360.f) < numeric_limits<float>::epsilon())
descriptor_dir = 0.f;
descriptor_dir *= CV_PI_F / 180.0f;
sincosf(descriptor_dir, &win.sin_dir, &win.cos_dir);
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int xLoadInd = tid % (PATCH_SZ + 1);
const int yLoadInd = tid / (PATCH_SZ + 1);
if (yLoadInd < (PATCH_SZ + 1))
{
if (s > 1)
{
AreaFilter<WinReader> filter(win, s, s);
s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd, xLoadInd);
}
else
{
LinearFilter<WinReader> filter(win);
s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd * s, xLoadInd * s);
}
}
__syncthreads();
const int xPatchInd = threadIdx.x % 5;
const int yPatchInd = threadIdx.x / 5;
if (yPatchInd < 5)
{
const int xBlockInd = threadIdx.y % 4;
const int yBlockInd = threadIdx.y / 4;
const int xInd = xBlockInd * 5 + xPatchInd;
const int yInd = yBlockInd * 5 + yPatchInd;
const float dw = c_DW[yInd * PATCH_SZ + xInd];
dx = (s_PATCH[yInd ][xInd + 1] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd + 1][xInd ]) * dw;
dy = (s_PATCH[yInd + 1][xInd ] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd ][xInd + 1]) * dw;
}
}
__global__ void compute_descriptors_64(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
__shared__ float smem[32 * 16];
float* sRow = smem + threadIdx.y * 32;
float dx, dy;
calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy);
float dxabs = ::fabsf(dx);
float dyabs = ::fabsf(dy);
plus<float> op;
reduce<32>(sRow, dx, threadIdx.x, op);
reduce<32>(sRow, dy, threadIdx.x, op);
reduce<32>(sRow, dxabs, threadIdx.x, op);
reduce<32>(sRow, dyabs, threadIdx.x, op);
float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y;
// write dx, dy, |dx|, |dy|
if (threadIdx.x == 0)
*descriptors_block = make_float4(dx, dy, dxabs, dyabs);
}
__global__ void compute_descriptors_128(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
__shared__ float smem[32 * 16];
float* sRow = smem + threadIdx.y * 32;
float dx, dy;
calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy);
float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y * 2;
plus<float> op;
float d1 = 0.0f;
float d2 = 0.0f;
float abs1 = 0.0f;
float abs2 = 0.0f;
if (dy >= 0)
{
d1 = dx;
abs1 = ::fabsf(dx);
}
else
{
d2 = dx;
abs2 = ::fabsf(dx);
}
reduce<32>(sRow, d1, threadIdx.x, op);
reduce<32>(sRow, d2, threadIdx.x, op);
reduce<32>(sRow, abs1, threadIdx.x, op);
reduce<32>(sRow, abs2, threadIdx.x, op);
// write dx (dy >= 0), |dx| (dy >= 0), dx (dy < 0), |dx| (dy < 0)
if (threadIdx.x == 0)
descriptors_block[0] = make_float4(d1, abs1, d2, abs2);
if (dx >= 0)
{
d1 = dy;
abs1 = ::fabsf(dy);
d2 = 0.0f;
abs2 = 0.0f;
}
else
{
d1 = 0.0f;
abs1 = 0.0f;
d2 = dy;
abs2 = ::fabsf(dy);
}
reduce<32>(sRow, d1, threadIdx.x, op);
reduce<32>(sRow, d2, threadIdx.x, op);
reduce<32>(sRow, abs1, threadIdx.x, op);
reduce<32>(sRow, abs2, threadIdx.x, op);
// write dy (dx >= 0), |dy| (dx >= 0), dy (dx < 0), |dy| (dx < 0)
if (threadIdx.x == 0)
descriptors_block[1] = make_float4(d1, abs1, d2, abs2);
}
template <int BLOCK_DIM_X> __global__ void normalize_descriptors(PtrStepf descriptors)
{
__shared__ float smem[BLOCK_DIM_X];
__shared__ float s_len;
// no need for thread ID
float* descriptor_base = descriptors.ptr(blockIdx.x);
// read in the unnormalized descriptor values (squared)
const float val = descriptor_base[threadIdx.x];
float len = val * val;
reduce<BLOCK_DIM_X>(smem, len, threadIdx.x, plus<float>());
if (threadIdx.x == 0)
s_len = ::sqrtf(len);
__syncthreads();
// normalize and store in output
descriptor_base[threadIdx.x] = val / s_len;
}
void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures)
{
// compute unnormalized descriptors, then normalize them - odd indexing since grid must be 2D
if (descriptors.cols == 64)
{
compute_descriptors_64<<<nFeatures, dim3(32, 16)>>>(descriptors, featureX, featureY, featureSize, featureDir);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
normalize_descriptors<64><<<nFeatures, 64>>>((PtrStepSzf) descriptors);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
else
{
compute_descriptors_128<<<nFeatures, dim3(32, 16)>>>(descriptors, featureX, featureY, featureSize, featureDir);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
normalize_descriptors<128><<<nFeatures, 128>>>((PtrStepSzf) descriptors);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
}
} // namespace surf
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */
|
075abc90476435e8d42f28706098c6c534bb8b65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Driver program for a CUDA-based A5/1 rainbow table generator.
*
* Copyright (C) 2009: Ingo Albrecht <prom@berlin.ccc.de>
*/
#ifndef TEST_INTERMEDIATES
/* values below are for normal runs */
/*
* These values are appropriate for a Quadro FX 570M.
*
* Before running this on different hardware, you
* should decrease OPERATIONS_PER_RUN and then
* increase it incrementally until you get
* run lengths approaching 5 seconds.
*
* Thread and block count should be selected
* so that they almost hit the register bound.
*
* If you want to tune the code for your card,
* you should do it incrementally, keeping
* the run length below 5 seconds, or your
* graphics subsystem might go wonky.
*/
// number of threads per block
#define NUM_THREADS 32
// number of blocks to schedule
#define NUM_BLOCKS 32
// how long each run should be in cycles.
// must be a power of two for now.
#define OPERATIONS_PER_RUN 32768
#else
// values below are for intermediate testing
#define NUM_THREADS 10
#define NUM_BLOCKS 1
#define OPERATIONS_PER_RUN 32768
#endif
// total operations per chain (2^21)
#define OPERATIONS_PER_CHAIN 2097152
// number of chains to be computed
#define NUM_CHAINS NUM_THREADS * NUM_BLOCKS
#include <stdio.h>
#include <unistd.h>
#include <cutil.h>
#include "calculate_chain_kernel.cu"
int
main(int argc, char **argv) {
CUT_DEVICE_INIT(argc, argv);
uint32 i;
uint64 start = 0; // XXX put your start vector here
printf("Computing %d chains divided into %d blocks of %d threads, starting at 0x%16.16llx\n",
NUM_CHAINS, NUM_BLOCKS, NUM_THREADS, start);
uint32 num_runs = OPERATIONS_PER_CHAIN / OPERATIONS_PER_RUN;
printf("Will execute %d runs of %d steps each.\n", num_runs, OPERATIONS_PER_RUN);
// create a timer for the whole run
unsigned int total_timer = 0;
CUT_SAFE_CALL(cutCreateTimer(&total_timer));
// compute size of state
uint32 s_results = NUM_CHAINS * sizeof(uint64);
// allocate and initialize host memory
uint64* h_results = (uint64*) calloc(1, s_results);
for(i = 0; i < NUM_CHAINS; i++) {
h_results[i] = start + i;
}
// allocate and initialize device memory
uint64* d_results;
CUDA_SAFE_CALL(hipMalloc((void**)&d_results, s_results));
CUT_SAFE_CALL(cutStartTimer(total_timer));
CUDA_SAFE_CALL(hipMemcpy(d_results, h_results, s_results, hipMemcpyHostToDevice));
double total_run_time = 0.0;
uint32 run;
for(run = 0; run < num_runs; run++) {
unsigned int run_timer = 0;
CUT_SAFE_CALL(cutCreateTimer(&run_timer));
uint32 index = OPERATIONS_PER_CHAIN - 1 - run * OPERATIONS_PER_RUN;
#ifdef TEST_INTERMEDIATES
// print intermediates (for testing against calculate_chains_dump)
for(i = 0; i < NUM_CHAINS; i++) {
printf("results[%d] = 0x%16.16llx\n", i, h_results[i]);
}
#endif
printf("Run %3.3d/%3.3d, starting at index 0x%6.6x... ", run+1, num_runs, index);
fflush(stdout);
usleep(500*1000);
CUT_SAFE_CALL(cutStartTimer(run_timer));
#ifdef TEST_INTERMEDIATES
CUDA_SAFE_CALL(hipMemcpy(d_results, h_results, s_results, hipMemcpyHostToDevice));
#endif
dim3 gridDims(NUM_BLOCKS, 1, 1);
dim3 blockDims(NUM_THREADS, 1, 1);
hipLaunchKernelGGL(( crunch), dim3(gridDims), dim3(blockDims), 0, 0, d_results, index);
CUDA_SAFE_CALL(hipDeviceSynchronize());
#ifdef TEST_INTERMEDIATES
CUDA_SAFE_CALL(hipMemcpy(h_results, d_results, s_results, hipMemcpyDeviceToHost));
#endif
CUT_SAFE_CALL(cutStopTimer(run_timer));
float run_time = cutGetTimerValue(run_timer);
printf("%f ms.\n", run_time);
total_run_time += run_time;
fflush(stdout);
CUT_SAFE_CALL(cutDeleteTimer(run_timer));
}
CUDA_SAFE_CALL(hipMemcpy(h_results, d_results, s_results, hipMemcpyDeviceToHost));
CUT_SAFE_CALL(cutStopTimer(total_timer));
// free device memory
CUDA_SAFE_CALL(hipFree((void**)d_results));
// print results
for(i = 0; i < NUM_CHAINS; i++) {
printf("results[%d] = 0x%16.16llx\n", i, h_results[i]);
}
// free host memory
free(h_results);
// report total time
printf("Total time: %f ms, %f spent crunching\n", cutGetTimerValue(total_timer), total_run_time);
// delete the whole-run timer
CUT_SAFE_CALL(cutDeleteTimer(total_timer));
return 0;
}
| 075abc90476435e8d42f28706098c6c534bb8b65.cu | /*
* Driver program for a CUDA-based A5/1 rainbow table generator.
*
* Copyright (C) 2009: Ingo Albrecht <prom@berlin.ccc.de>
*/
#ifndef TEST_INTERMEDIATES
/* values below are for normal runs */
/*
* These values are appropriate for a Quadro FX 570M.
*
* Before running this on different hardware, you
* should decrease OPERATIONS_PER_RUN and then
* increase it incrementally until you get
* run lengths approaching 5 seconds.
*
* Thread and block count should be selected
* so that they almost hit the register bound.
*
* If you want to tune the code for your card,
* you should do it incrementally, keeping
* the run length below 5 seconds, or your
* graphics subsystem might go wonky.
*/
// number of threads per block
#define NUM_THREADS 32
// number of blocks to schedule
#define NUM_BLOCKS 32
// how long each run should be in cycles.
// must be a power of two for now.
#define OPERATIONS_PER_RUN 32768
#else
// values below are for intermediate testing
#define NUM_THREADS 10
#define NUM_BLOCKS 1
#define OPERATIONS_PER_RUN 32768
#endif
// total operations per chain (2^21)
#define OPERATIONS_PER_CHAIN 2097152
// number of chains to be computed
#define NUM_CHAINS NUM_THREADS * NUM_BLOCKS
#include <stdio.h>
#include <unistd.h>
#include <cutil.h>
#include "calculate_chain_kernel.cu"
int
main(int argc, char **argv) {
CUT_DEVICE_INIT(argc, argv);
uint32 i;
uint64 start = 0; // XXX put your start vector here
printf("Computing %d chains divided into %d blocks of %d threads, starting at 0x%16.16llx\n",
NUM_CHAINS, NUM_BLOCKS, NUM_THREADS, start);
uint32 num_runs = OPERATIONS_PER_CHAIN / OPERATIONS_PER_RUN;
printf("Will execute %d runs of %d steps each.\n", num_runs, OPERATIONS_PER_RUN);
// create a timer for the whole run
unsigned int total_timer = 0;
CUT_SAFE_CALL(cutCreateTimer(&total_timer));
// compute size of state
uint32 s_results = NUM_CHAINS * sizeof(uint64);
// allocate and initialize host memory
uint64* h_results = (uint64*) calloc(1, s_results);
for(i = 0; i < NUM_CHAINS; i++) {
h_results[i] = start + i;
}
// allocate and initialize device memory
uint64* d_results;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_results, s_results));
CUT_SAFE_CALL(cutStartTimer(total_timer));
CUDA_SAFE_CALL(cudaMemcpy(d_results, h_results, s_results, cudaMemcpyHostToDevice));
double total_run_time = 0.0;
uint32 run;
for(run = 0; run < num_runs; run++) {
unsigned int run_timer = 0;
CUT_SAFE_CALL(cutCreateTimer(&run_timer));
uint32 index = OPERATIONS_PER_CHAIN - 1 - run * OPERATIONS_PER_RUN;
#ifdef TEST_INTERMEDIATES
// print intermediates (for testing against calculate_chains_dump)
for(i = 0; i < NUM_CHAINS; i++) {
printf("results[%d] = 0x%16.16llx\n", i, h_results[i]);
}
#endif
printf("Run %3.3d/%3.3d, starting at index 0x%6.6x... ", run+1, num_runs, index);
fflush(stdout);
usleep(500*1000);
CUT_SAFE_CALL(cutStartTimer(run_timer));
#ifdef TEST_INTERMEDIATES
CUDA_SAFE_CALL(cudaMemcpy(d_results, h_results, s_results, cudaMemcpyHostToDevice));
#endif
dim3 gridDims(NUM_BLOCKS, 1, 1);
dim3 blockDims(NUM_THREADS, 1, 1);
crunch<<<gridDims, blockDims>>>(d_results, index);
CUDA_SAFE_CALL(cudaThreadSynchronize());
#ifdef TEST_INTERMEDIATES
CUDA_SAFE_CALL(cudaMemcpy(h_results, d_results, s_results, cudaMemcpyDeviceToHost));
#endif
CUT_SAFE_CALL(cutStopTimer(run_timer));
float run_time = cutGetTimerValue(run_timer);
printf("%f ms.\n", run_time);
total_run_time += run_time;
fflush(stdout);
CUT_SAFE_CALL(cutDeleteTimer(run_timer));
}
CUDA_SAFE_CALL(cudaMemcpy(h_results, d_results, s_results, cudaMemcpyDeviceToHost));
CUT_SAFE_CALL(cutStopTimer(total_timer));
// free device memory
CUDA_SAFE_CALL(cudaFree((void**)d_results));
// print results
for(i = 0; i < NUM_CHAINS; i++) {
printf("results[%d] = 0x%16.16llx\n", i, h_results[i]);
}
// free host memory
free(h_results);
// report total time
printf("Total time: %f ms, %f spent crunching\n", cutGetTimerValue(total_timer), total_run_time);
// delete the whole-run timer
CUT_SAFE_CALL(cutDeleteTimer(total_timer));
return 0;
}
|
99a5591fa14a6fdd892c48e6b089b42c78e656f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <hip/hip_complex.h>
#define CUDA_KERNEL_LOOP_x(i,n) \
for(int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
#define CUDA_KERNEL_LOOP_y(j,m) \
for(int j = blockIdx.y * blockDim.y + threadIdx.y; \
j < (m); \
j += blockDim.y * gridDim.y)
__device__ int mandelbrot(hipDoubleComplex c, int threshold)
{
hipDoubleComplex z = make_cuDoubleComplex(0, 0);
for(int i = 0; i < threshold; i++)
{
z = cuCadd(cuCmul(z, z), c);
if(cuCabs(z) > 2)
return i;
}
return 0;
}
__global__ void mandelbrot_set(double xmin, double xmax, double ymin, double ymax,
int xn, int yn, int threshold, int *atlas)
{
CUDA_KERNEL_LOOP_y(j, yn)
{
CUDA_KERNEL_LOOP_x(i, xn)
{
double cx = xmin + i * (xmax - xmin) / xn;
double cy = ymin + j * (ymax - ymin) / yn;
hipDoubleComplex c = make_cuDoubleComplex(cx, cy);
atlas[j * xn + i] = mandelbrot(c, threshold);
}
}
}
int main()
{
int *host_atlas = nullptr;
int *device_atlas = nullptr;
host_atlas = (int*)malloc(1920 * 1080 * sizeof(int));
hipMalloc((void**) &device_atlas, 1920 * 1080 * sizeof(int));
dim3 d(16, 16, 1);
hipLaunchKernelGGL(( mandelbrot_set), dim3(d), dim3(d), 0, 0, -0.748768, -0.748718, 0.0650619375, 0.0650900625, 1920, 1080, 2048, device_atlas);
hipMemcpy(host_atlas, device_atlas, 1920 * 1080 * sizeof(int), hipMemcpyDeviceToHost);
FILE *fp = fopen("MathPic.ppm","wb");
int max = 0;
fprintf(fp, "P6\n%d %d\n255\n", 1920, 1080);
for(int i = 0; i < 1920 * 1080; i++)
{
if(host_atlas[i] > max)
max = host_atlas[i];
}
for(int i = 0; i < 1920 * 1080; i++)
{
char c = host_atlas[i] * 255 / max;
fwrite(&c, 1, 1, fp);
fwrite(&c, 1, 1, fp);
fwrite(&c, 1, 1, fp);
}
fclose(fp);
return 0;
}
| 99a5591fa14a6fdd892c48e6b089b42c78e656f6.cu | #include <stdio.h>
#include <cuComplex.h>
#define CUDA_KERNEL_LOOP_x(i,n) \
for(int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
#define CUDA_KERNEL_LOOP_y(j,m) \
for(int j = blockIdx.y * blockDim.y + threadIdx.y; \
j < (m); \
j += blockDim.y * gridDim.y)
__device__ int mandelbrot(cuDoubleComplex c, int threshold)
{
cuDoubleComplex z = make_cuDoubleComplex(0, 0);
for(int i = 0; i < threshold; i++)
{
z = cuCadd(cuCmul(z, z), c);
if(cuCabs(z) > 2)
return i;
}
return 0;
}
__global__ void mandelbrot_set(double xmin, double xmax, double ymin, double ymax,
int xn, int yn, int threshold, int *atlas)
{
CUDA_KERNEL_LOOP_y(j, yn)
{
CUDA_KERNEL_LOOP_x(i, xn)
{
double cx = xmin + i * (xmax - xmin) / xn;
double cy = ymin + j * (ymax - ymin) / yn;
cuDoubleComplex c = make_cuDoubleComplex(cx, cy);
atlas[j * xn + i] = mandelbrot(c, threshold);
}
}
}
int main()
{
int *host_atlas = nullptr;
int *device_atlas = nullptr;
host_atlas = (int*)malloc(1920 * 1080 * sizeof(int));
cudaMalloc((void**) &device_atlas, 1920 * 1080 * sizeof(int));
dim3 d(16, 16, 1);
mandelbrot_set<<<d, d>>>(-0.748768, -0.748718, 0.0650619375, 0.0650900625, 1920, 1080, 2048, device_atlas);
cudaMemcpy(host_atlas, device_atlas, 1920 * 1080 * sizeof(int), cudaMemcpyDeviceToHost);
FILE *fp = fopen("MathPic.ppm","wb");
int max = 0;
fprintf(fp, "P6\n%d %d\n255\n", 1920, 1080);
for(int i = 0; i < 1920 * 1080; i++)
{
if(host_atlas[i] > max)
max = host_atlas[i];
}
for(int i = 0; i < 1920 * 1080; i++)
{
char c = host_atlas[i] * 255 / max;
fwrite(&c, 1, 1, fp);
fwrite(&c, 1, 1, fp);
fwrite(&c, 1, 1, fp);
}
fclose(fp);
return 0;
}
|
793fcba4f47875e1af40b25aff6bbd91cf757e76.hip | // !!! This is a file automatically generated by hipify!!!
//======================================
//
//
// GPU
//======================================
#include"stdafx.h"
#include"Reshape_MirrorX_DATA.hpp"
#include"Reshape_MirrorX_FUNC.hpp"
#include"Reshape_MirrorX_Base.h"
#include"Reshape_MirrorX_GPU.cuh"
#include"Reshape_MirrorX_LayerData_GPU.cuh"
using namespace Gravisbell;
using namespace Gravisbell::Layer::NeuralNetwork;
namespace Gravisbell {
namespace Layer {
namespace NeuralNetwork {
/** */
Reshape_MirrorX_GPU::Reshape_MirrorX_GPU(Gravisbell::GUID guid, Reshape_MirrorX_LayerData_GPU& i_layerData, const IODataStruct& i_inputDataStruct, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager)
: Reshape_MirrorX_Base (guid, i_inputDataStruct, i_layerData.GetOutputDataStruct(&i_inputDataStruct, 1))
, layerData (i_layerData) /**< */
{
}
/** */
Reshape_MirrorX_GPU::~Reshape_MirrorX_GPU()
{
}
//================================
//
//================================
/** */
U32 Reshape_MirrorX_GPU::GetLayerKind()const
{
return Layer::ELayerKind::LAYER_KIND_GPU | GetLayerKindBase();
}
/** .
@return 0 */
ErrorCode Reshape_MirrorX_GPU::Initialize(void)
{
return this->layerData.Initialize();
}
//===========================
//
//===========================
/** */
Reshape_MirrorX_LayerData_Base& Reshape_MirrorX_GPU::GetLayerData()
{
return this->layerData;
}
const Reshape_MirrorX_LayerData_Base& Reshape_MirrorX_GPU::GetLayerData()const
{
return this->layerData;
}
//================================
//
//================================
/** .()
@param batchSize .
NN.
PreProcessLearnLoop. */
ErrorCode Reshape_MirrorX_GPU::PreProcessLearn()
{
ErrorCode errorCode = this->PreProcessCalculate();
if(errorCode != ErrorCode::ERROR_CODE_NONE)
return errorCode;
//
this->m_lpDOutputBuffer_h.resize(this->GetBatchSize() * this->outputBufferCount);
this->m_lppDOutputBuffer.resize(this->GetBatchSize());
for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++)
this->m_lppDOutputBuffer[batchNum] = &this->m_lpDOutputBuffer_h[batchNum * this->outputBufferCount];
//
this->m_lpDInputBuffer_h.resize(this->GetBatchSize() * this->inputBufferCount);
this->m_lppDInputBuffer.resize(this->GetBatchSize());
for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++)
this->m_lppDInputBuffer[batchNum] = &this->m_lpDInputBuffer_h[batchNum * this->inputBufferCount];
return ErrorCode::ERROR_CODE_NONE;
}
/** .()
@param batchSize .
NN.
Calculate. */
ErrorCode Reshape_MirrorX_GPU::PreProcessCalculate()
{
//
this->inputBufferCount = this->GetInputBufferCount();
if(this->inputBufferCount == 0)
return ErrorCode::ERROR_CODE_FRAUD_INPUT_COUNT;
//
this->outputBufferCount = this->GetOutputBufferCount();
if(this->outputBufferCount == 0)
return ErrorCode::ERROR_CODE_FRAUD_OUTPUT_COUNT;
//
this->m_lpInputBuffer_h.resize(this->inputBufferCount * this->GetBatchSize());
this->m_lppInputBuffer.resize(this->GetBatchSize(), NULL);
for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++)
{
this->m_lppInputBuffer[batchNum] = &this->m_lpInputBuffer_h[batchNum * this->inputBufferCount];
}
//
this->m_lpOutputBuffer_h.resize(this->GetBatchSize() * this->outputBufferCount);
this->m_lppOutputBuffer.resize(this->GetBatchSize());
for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++)
{
this->m_lppOutputBuffer[batchNum] = &this->m_lpOutputBuffer_h[batchNum * this->outputBufferCount];
}
return ErrorCode::ERROR_CODE_NONE;
}
/** .
Calculate. */
ErrorCode Reshape_MirrorX_GPU::PreProcessLoop()
{
return ErrorCode::ERROR_CODE_NONE;
}
/** .
@param lpInputBuffer . GetInputBufferCount
@return 0 */
ErrorCode Reshape_MirrorX_GPU::Calculate_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppOutputBuffer)
{
hipMemcpy(&this->m_lpInputBuffer_h[0], i_lppInputBuffer, sizeof(F32)*this->inputBufferCount*this->GetBatchSize(), hipMemcpyDeviceToHost);
for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++)
{
for(U32 ch=0; ch<this->GetInputDataStruct().ch; ch++)
{
for(U32 inputZ=0; inputZ<this->GetInputDataStruct().z; inputZ++)
{
for(U32 inputY=0; inputY<this->GetInputDataStruct().y; inputY++)
{
for(U32 inputX=0; inputX<this->GetInputDataStruct().x; inputX++)
{
U32 inputOffset = this->GetInputDataStruct().POSITION_TO_OFFSET(inputX, inputY, inputZ, ch);
U32 outputOffset0 = this->GetOutputDataStruct().POSITION_TO_OFFSET(this->GetInputDataStruct().x-1-inputX, inputY, inputZ, ch);
U32 outputOffset1 = this->GetOutputDataStruct().POSITION_TO_OFFSET(this->GetInputDataStruct().x-1+inputX, inputY, inputZ, ch);
this->m_lppOutputBuffer[batchNum][outputOffset0] = this->m_lppInputBuffer[batchNum][inputOffset];
this->m_lppOutputBuffer[batchNum][outputOffset1] = this->m_lppInputBuffer[batchNum][inputOffset];
}
}
}
}
}
//
hipMemcpy(o_lppOutputBuffer, &this->m_lpOutputBuffer_h[0], sizeof(F32)*this->outputBufferCount*this->GetBatchSize(), hipMemcpyHostToDevice);
return ErrorCode::ERROR_CODE_NONE;
}
//================================
//
//================================
/** ..
Calculate.
@param o_lppDInputBuffer . [GetBatchSize()][GetInputBufferCount()].
@param i_lppDOutputBuffer =. [GetBatchSize()][GetOutputBufferCount()][GetOutputDataCount()]
*/
ErrorCode Reshape_MirrorX_GPU::CalculateDInput_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer)
{
//
if(i_lppDOutputBuffer && o_lppDInputBuffer)
{
//
hipMemcpy(&this->m_lpDOutputBuffer_h[0], i_lppDOutputBuffer, sizeof(F32)*this->outputBufferCount*this->GetBatchSize(), hipMemcpyDeviceToHost);
//
memset(&this->m_lpDInputBuffer_h[0], 0, sizeof(F32)*this->GetBatchSize()*this->inputBufferCount);
//
for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++)
{
for(U32 ch=0; ch<this->GetOutputDataStruct().ch; ch++)
{
for(U32 outputZ=0; outputZ<this->GetOutputDataStruct().z; outputZ++)
{
for(U32 outputY=0; outputY<this->GetOutputDataStruct().y; outputY++)
{
for(U32 outputX=0; outputX<this->GetOutputDataStruct().x; outputX++)
{
U32 inputX = abs((S32)(outputX - (this->GetInputDataStruct().x-1)));
U32 outputOffset = this->GetOutputDataStruct().POSITION_TO_OFFSET(outputX, outputY, outputZ, ch);
U32 inputOffset = this->GetInputDataStruct().POSITION_TO_OFFSET(inputX, outputY, outputZ, ch);
this->m_lppDInputBuffer[batchNum][inputOffset] += this->m_lppDOutputBuffer[batchNum][outputOffset];
}
}
}
}
}
//
hipMemcpy(o_lppDInputBuffer, &this->m_lpDInputBuffer_h[0], sizeof(F32)*this->inputBufferCount*this->GetBatchSize(), hipMemcpyHostToDevice);
}
return ErrorCode::ERROR_CODE_NONE;
}
/** .
Calculate.
@param i_lppDOutputBuffer =. [GetBatchSize()][GetOutputBufferCount()].
*/
ErrorCode Reshape_MirrorX_GPU::Training_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer)
{
return this->CalculateDInput_device(i_lppInputBuffer, o_lppDInputBuffer, i_lppOutputBuffer, i_lppDOutputBuffer);
}
} // Gravisbell;
} // Layer;
} // NeuralNetwork;
| 793fcba4f47875e1af40b25aff6bbd91cf757e76.cu | //======================================
// フィードフォワードニューラルネットワークの統合処理レイヤー
// 結合、活性化
// GPU処理用
//======================================
#include"stdafx.h"
#include"Reshape_MirrorX_DATA.hpp"
#include"Reshape_MirrorX_FUNC.hpp"
#include"Reshape_MirrorX_Base.h"
#include"Reshape_MirrorX_GPU.cuh"
#include"Reshape_MirrorX_LayerData_GPU.cuh"
using namespace Gravisbell;
using namespace Gravisbell::Layer::NeuralNetwork;
namespace Gravisbell {
namespace Layer {
namespace NeuralNetwork {
/** コンストラクタ */
Reshape_MirrorX_GPU::Reshape_MirrorX_GPU(Gravisbell::GUID guid, Reshape_MirrorX_LayerData_GPU& i_layerData, const IODataStruct& i_inputDataStruct, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager)
: Reshape_MirrorX_Base (guid, i_inputDataStruct, i_layerData.GetOutputDataStruct(&i_inputDataStruct, 1))
, layerData (i_layerData) /**< レイヤーデータ */
{
}
/** デストラクタ */
Reshape_MirrorX_GPU::~Reshape_MirrorX_GPU()
{
}
//================================
// 基本処理
//================================
/** レイヤー種別の取得 */
U32 Reshape_MirrorX_GPU::GetLayerKind()const
{
return Layer::ELayerKind::LAYER_KIND_GPU | GetLayerKindBase();
}
/** 初期化. 各ニューロンの値をランダムに初期化
@return 成功した場合0 */
ErrorCode Reshape_MirrorX_GPU::Initialize(void)
{
return this->layerData.Initialize();
}
//===========================
// レイヤーデータ関連
//===========================
/** レイヤーデータを取得する */
Reshape_MirrorX_LayerData_Base& Reshape_MirrorX_GPU::GetLayerData()
{
return this->layerData;
}
const Reshape_MirrorX_LayerData_Base& Reshape_MirrorX_GPU::GetLayerData()const
{
return this->layerData;
}
//================================
// 演算処理
//================================
/** 演算前処理を実行する.(学習用)
@param batchSize 同時に演算を行うバッチのサイズ.
NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない.
失敗した場合はPreProcessLearnLoop以降の処理は実行不可. */
ErrorCode Reshape_MirrorX_GPU::PreProcessLearn()
{
ErrorCode errorCode = this->PreProcessCalculate();
if(errorCode != ErrorCode::ERROR_CODE_NONE)
return errorCode;
// 出力誤差バッファ
this->m_lpDOutputBuffer_h.resize(this->GetBatchSize() * this->outputBufferCount);
this->m_lppDOutputBuffer.resize(this->GetBatchSize());
for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++)
this->m_lppDOutputBuffer[batchNum] = &this->m_lpDOutputBuffer_h[batchNum * this->outputBufferCount];
// 入力誤差バッファ
this->m_lpDInputBuffer_h.resize(this->GetBatchSize() * this->inputBufferCount);
this->m_lppDInputBuffer.resize(this->GetBatchSize());
for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++)
this->m_lppDInputBuffer[batchNum] = &this->m_lpDInputBuffer_h[batchNum * this->inputBufferCount];
return ErrorCode::ERROR_CODE_NONE;
}
/** 演算前処理を実行する.(演算用)
@param batchSize 同時に演算を行うバッチのサイズ.
NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない.
失敗した場合はCalculate以降の処理は実行不可. */
ErrorCode Reshape_MirrorX_GPU::PreProcessCalculate()
{
// 入力バッファ数を確認
this->inputBufferCount = this->GetInputBufferCount();
if(this->inputBufferCount == 0)
return ErrorCode::ERROR_CODE_FRAUD_INPUT_COUNT;
// 出力バッファ数を確認
this->outputBufferCount = this->GetOutputBufferCount();
if(this->outputBufferCount == 0)
return ErrorCode::ERROR_CODE_FRAUD_OUTPUT_COUNT;
// 入力バッファ保存用のアドレス配列を作成
this->m_lpInputBuffer_h.resize(this->inputBufferCount * this->GetBatchSize());
this->m_lppInputBuffer.resize(this->GetBatchSize(), NULL);
for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++)
{
this->m_lppInputBuffer[batchNum] = &this->m_lpInputBuffer_h[batchNum * this->inputBufferCount];
}
// 出力バッファを作成
this->m_lpOutputBuffer_h.resize(this->GetBatchSize() * this->outputBufferCount);
this->m_lppOutputBuffer.resize(this->GetBatchSize());
for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++)
{
this->m_lppOutputBuffer[batchNum] = &this->m_lpOutputBuffer_h[batchNum * this->outputBufferCount];
}
return ErrorCode::ERROR_CODE_NONE;
}
/** ループの初期化処理.データセットの実行開始前に実行する
失敗した場合はCalculate以降の処理は実行不可. */
ErrorCode Reshape_MirrorX_GPU::PreProcessLoop()
{
return ErrorCode::ERROR_CODE_NONE;
}
/** 演算処理を実行する.
@param lpInputBuffer 入力データバッファ. GetInputBufferCountで取得した値の要素数が必要
@return 成功した場合0が返る */
ErrorCode Reshape_MirrorX_GPU::Calculate_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppOutputBuffer)
{
cudaMemcpy(&this->m_lpInputBuffer_h[0], i_lppInputBuffer, sizeof(F32)*this->inputBufferCount*this->GetBatchSize(), cudaMemcpyDeviceToHost);
for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++)
{
for(U32 ch=0; ch<this->GetInputDataStruct().ch; ch++)
{
for(U32 inputZ=0; inputZ<this->GetInputDataStruct().z; inputZ++)
{
for(U32 inputY=0; inputY<this->GetInputDataStruct().y; inputY++)
{
for(U32 inputX=0; inputX<this->GetInputDataStruct().x; inputX++)
{
U32 inputOffset = this->GetInputDataStruct().POSITION_TO_OFFSET(inputX, inputY, inputZ, ch);
U32 outputOffset0 = this->GetOutputDataStruct().POSITION_TO_OFFSET(this->GetInputDataStruct().x-1-inputX, inputY, inputZ, ch);
U32 outputOffset1 = this->GetOutputDataStruct().POSITION_TO_OFFSET(this->GetInputDataStruct().x-1+inputX, inputY, inputZ, ch);
this->m_lppOutputBuffer[batchNum][outputOffset0] = this->m_lppInputBuffer[batchNum][inputOffset];
this->m_lppOutputBuffer[batchNum][outputOffset1] = this->m_lppInputBuffer[batchNum][inputOffset];
}
}
}
}
}
// 出力バッファをデバイスにコピー
cudaMemcpy(o_lppOutputBuffer, &this->m_lpOutputBuffer_h[0], sizeof(F32)*this->outputBufferCount*this->GetBatchSize(), cudaMemcpyHostToDevice);
return ErrorCode::ERROR_CODE_NONE;
}
//================================
// 学習処理
//================================
/** 入力誤差計算をを実行する.学習せずに入力誤差を取得したい場合に使用する.
入力信号、出力信号は直前のCalculateの値を参照する.
@param o_lppDInputBuffer 入力誤差差分格納先レイヤー. [GetBatchSize()の戻り値][GetInputBufferCount()の戻り値]の要素数が必要.
@param i_lppDOutputBuffer 出力誤差差分=次レイヤーの入力誤差差分. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要な配列の[GetOutputDataCount()]配列
直前の計算結果を使用する */
ErrorCode Reshape_MirrorX_GPU::CalculateDInput_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer)
{
// 入力誤差計算
if(i_lppDOutputBuffer && o_lppDInputBuffer)
{
// 出力誤差バッファをホストにコピー
cudaMemcpy(&this->m_lpDOutputBuffer_h[0], i_lppDOutputBuffer, sizeof(F32)*this->outputBufferCount*this->GetBatchSize(), cudaMemcpyDeviceToHost);
// 入力誤差を初期化
memset(&this->m_lpDInputBuffer_h[0], 0, sizeof(F32)*this->GetBatchSize()*this->inputBufferCount);
// 入力誤差計算
for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++)
{
for(U32 ch=0; ch<this->GetOutputDataStruct().ch; ch++)
{
for(U32 outputZ=0; outputZ<this->GetOutputDataStruct().z; outputZ++)
{
for(U32 outputY=0; outputY<this->GetOutputDataStruct().y; outputY++)
{
for(U32 outputX=0; outputX<this->GetOutputDataStruct().x; outputX++)
{
U32 inputX = abs((S32)(outputX - (this->GetInputDataStruct().x-1)));
U32 outputOffset = this->GetOutputDataStruct().POSITION_TO_OFFSET(outputX, outputY, outputZ, ch);
U32 inputOffset = this->GetInputDataStruct().POSITION_TO_OFFSET(inputX, outputY, outputZ, ch);
this->m_lppDInputBuffer[batchNum][inputOffset] += this->m_lppDOutputBuffer[batchNum][outputOffset];
}
}
}
}
}
// 入力誤差をデバイスにコピー
cudaMemcpy(o_lppDInputBuffer, &this->m_lpDInputBuffer_h[0], sizeof(F32)*this->inputBufferCount*this->GetBatchSize(), cudaMemcpyHostToDevice);
}
return ErrorCode::ERROR_CODE_NONE;
}
/** 学習処理を実行する.
入力信号、出力信号は直前のCalculateの値を参照する.
@param i_lppDOutputBuffer 出力誤差差分=次レイヤーの入力誤差差分. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要.
直前の計算結果を使用する */
ErrorCode Reshape_MirrorX_GPU::Training_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer)
{
return this->CalculateDInput_device(i_lppInputBuffer, o_lppDInputBuffer, i_lppOutputBuffer, i_lppDOutputBuffer);
}
} // Gravisbell;
} // Layer;
} // NeuralNetwork;
|
bf9cefef141c511ecedd6bd3fd53337cfc1e221e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Kent Gauen
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/impl/FaissAssert.h>
#include <faiss/gpu/utils/BurstNnfSimpleBlockSelect.cuh>
/****
Select "topK" from "blockTileSize" of inVals
****/
#define ABS(N) (((N)<0)?(-(N)):((N)))
namespace faiss {
namespace gpu {
__global__ void burstNnfBlockSelect(
Tensor<float, 3, true> inVals,
Tensor<int, 3, true> inKeys,
Tensor<float, 3, true> outVals,
Tensor<int, 5, true> outKeys,
float valMean) {
int row = threadIdx.x + blockDim.x * blockIdx.x;
int col = threadIdx.y + blockDim.y * blockIdx.y;
int nframes = inKeys.getSize(0);
int numOfComps = inKeys.getSize(1);
bool legal_row = row < inVals.getSize(0);
bool legal_col = col < inVals.getSize(1);
int k = outVals.getSize(2);
int kidx = 0;
if ( legal_row && legal_col ) {
float outVal_max = ABS(outVals[row][col][k-1] - valMean);
float outVal_curr = outVal_max;
for (int comp = 0; comp < numOfComps; ++comp){
float inVal_raw = inVals[row][col][comp];
float inVal = ABS(inVal_raw - valMean);
if (inVal < outVal_max){
kidx = k-1;
outVal_curr = outVal_max;
while( inVal < outVal_curr && kidx > 0){
kidx -= 1;
outVal_curr = outVals[row][col][kidx];
outVal_curr = ABS(outVal_curr - valMean);
}
if (kidx != 0){ kidx += 1; }
else if (inVal > outVal_curr){ kidx += 1; }
// printf("an assign!: %d,%f\n",kidx,inVal);
// shift values up
for (int sidx = k-1; sidx > kidx; --sidx){
outVals[row][col][sidx] = (float)outVals[row][col][sidx-1];
for (int fidx = 0; fidx < nframes; ++fidx){
outKeys[fidx][row][col][sidx][0] = (int)
outKeys[fidx][row][col][sidx-1][0];
outKeys[fidx][row][col][sidx][1] = (int)
outKeys[fidx][row][col][sidx-1][1];
}
}
// assign new values
outVals[row][col][kidx] = inVal_raw;
for (int fidx = 0; fidx < nframes; ++fidx){
outKeys[fidx][row][col][kidx][0] = inKeys[fidx][comp][0];
outKeys[fidx][row][col][kidx][1] = inKeys[fidx][comp][1];
}
outVal_max = ABS(outVals[row][col][k-1]-valMean);
}
}
}
}
void runBurstNnfSimpleBlockSelect(
Tensor<float, 3, true>& inVals,
Tensor<int, 3, true>& inKeys,
Tensor<float, 3, true>& outVals,
Tensor<int, 5, true>& outKeys,
float valMean, bool comp_with_out,int k,
hipStream_t stream){
// assert shapes
FAISS_ASSERT(outVals.getSize(0) == outKeys.getSize(1)); // height
FAISS_ASSERT(outVals.getSize(1) == outKeys.getSize(2)); // width
FAISS_ASSERT(inVals.getSize(0) == outVals.getSize(0)); // nframes
FAISS_ASSERT(inVals.getSize(1) == outVals.getSize(1)); // width
FAISS_ASSERT(inVals.getSize(2) == inKeys.getSize(1)); // batched search space
FAISS_ASSERT(outVals.getSize(2) == k);
FAISS_ASSERT(outKeys.getSize(3) == k);
// setup kernel launch
// keep it simple; each (h,w) index gets a thread, _not_ a block
// it is not as parallel as it could be.
// this will probably have horrible warp divergence too
int maxThreads = (int) getMaxThreadsCurrentDevice();
// std::cout << "maxThreads: " << maxThreads << std::endl;
int sqrtThreads = 32;//utils::pow(maxThreads*1.0, .5);
auto nBlocksH = utils::divUp(inVals.getSize(0),sqrtThreads);
auto nBlocksW = utils::divUp(inVals.getSize(1),sqrtThreads);
// printf("(nBlocksH,nBlocksW,sqrtThreads): (%d,%d,%d)\n",nBlocksH,nBlocksW,sqrtThreads);
auto grid = dim3(nBlocksH,nBlocksW);
auto block = dim3(sqrtThreads,sqrtThreads);
// launch kernel
hipLaunchKernelGGL(( burstNnfBlockSelect), dim3(grid), dim3(block), 0, stream, inVals, inKeys,
outVals, outKeys,
valMean);
CUDA_TEST_ERROR();
}
}
} | bf9cefef141c511ecedd6bd3fd53337cfc1e221e.cu | /**
* Copyright (c) Kent Gauen
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/impl/FaissAssert.h>
#include <faiss/gpu/utils/BurstNnfSimpleBlockSelect.cuh>
/****
Select "topK" from "blockTileSize" of inVals
****/
#define ABS(N) (((N)<0)?(-(N)):((N)))
namespace faiss {
namespace gpu {
__global__ void burstNnfBlockSelect(
Tensor<float, 3, true> inVals,
Tensor<int, 3, true> inKeys,
Tensor<float, 3, true> outVals,
Tensor<int, 5, true> outKeys,
float valMean) {
int row = threadIdx.x + blockDim.x * blockIdx.x;
int col = threadIdx.y + blockDim.y * blockIdx.y;
int nframes = inKeys.getSize(0);
int numOfComps = inKeys.getSize(1);
bool legal_row = row < inVals.getSize(0);
bool legal_col = col < inVals.getSize(1);
int k = outVals.getSize(2);
int kidx = 0;
if ( legal_row && legal_col ) {
float outVal_max = ABS(outVals[row][col][k-1] - valMean);
float outVal_curr = outVal_max;
for (int comp = 0; comp < numOfComps; ++comp){
float inVal_raw = inVals[row][col][comp];
float inVal = ABS(inVal_raw - valMean);
if (inVal < outVal_max){
kidx = k-1;
outVal_curr = outVal_max;
while( inVal < outVal_curr && kidx > 0){
kidx -= 1;
outVal_curr = outVals[row][col][kidx];
outVal_curr = ABS(outVal_curr - valMean);
}
if (kidx != 0){ kidx += 1; }
else if (inVal > outVal_curr){ kidx += 1; }
// printf("an assign!: %d,%f\n",kidx,inVal);
// shift values up
for (int sidx = k-1; sidx > kidx; --sidx){
outVals[row][col][sidx] = (float)outVals[row][col][sidx-1];
for (int fidx = 0; fidx < nframes; ++fidx){
outKeys[fidx][row][col][sidx][0] = (int)
outKeys[fidx][row][col][sidx-1][0];
outKeys[fidx][row][col][sidx][1] = (int)
outKeys[fidx][row][col][sidx-1][1];
}
}
// assign new values
outVals[row][col][kidx] = inVal_raw;
for (int fidx = 0; fidx < nframes; ++fidx){
outKeys[fidx][row][col][kidx][0] = inKeys[fidx][comp][0];
outKeys[fidx][row][col][kidx][1] = inKeys[fidx][comp][1];
}
outVal_max = ABS(outVals[row][col][k-1]-valMean);
}
}
}
}
void runBurstNnfSimpleBlockSelect(
Tensor<float, 3, true>& inVals,
Tensor<int, 3, true>& inKeys,
Tensor<float, 3, true>& outVals,
Tensor<int, 5, true>& outKeys,
float valMean, bool comp_with_out,int k,
cudaStream_t stream){
// assert shapes
FAISS_ASSERT(outVals.getSize(0) == outKeys.getSize(1)); // height
FAISS_ASSERT(outVals.getSize(1) == outKeys.getSize(2)); // width
FAISS_ASSERT(inVals.getSize(0) == outVals.getSize(0)); // nframes
FAISS_ASSERT(inVals.getSize(1) == outVals.getSize(1)); // width
FAISS_ASSERT(inVals.getSize(2) == inKeys.getSize(1)); // batched search space
FAISS_ASSERT(outVals.getSize(2) == k);
FAISS_ASSERT(outKeys.getSize(3) == k);
// setup kernel launch
// keep it simple; each (h,w) index gets a thread, _not_ a block
// it is not as parallel as it could be.
// this will probably have horrible warp divergence too
int maxThreads = (int) getMaxThreadsCurrentDevice();
// std::cout << "maxThreads: " << maxThreads << std::endl;
int sqrtThreads = 32;//utils::pow(maxThreads*1.0, .5);
auto nBlocksH = utils::divUp(inVals.getSize(0),sqrtThreads);
auto nBlocksW = utils::divUp(inVals.getSize(1),sqrtThreads);
// printf("(nBlocksH,nBlocksW,sqrtThreads): (%d,%d,%d)\n",nBlocksH,nBlocksW,sqrtThreads);
auto grid = dim3(nBlocksH,nBlocksW);
auto block = dim3(sqrtThreads,sqrtThreads);
// launch kernel
burstNnfBlockSelect<<<grid, block, 0, stream>>>(inVals, inKeys,
outVals, outKeys,
valMean);
CUDA_TEST_ERROR();
}
}
} |
f1f889703412df385594e09c17becf67406a265f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cuConvert32uC1To32fC1Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const unsigned int *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
size_t src_stride = 2;
float *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
size_t dst_stride = 2;
float mul_constant = 1;
float add_constant = 1;
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cuConvert32uC1To32fC1Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,src_stride,dst,dst_stride,mul_constant,add_constant,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cuConvert32uC1To32fC1Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,src_stride,dst,dst_stride,mul_constant,add_constant,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cuConvert32uC1To32fC1Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,src_stride,dst,dst_stride,mul_constant,add_constant,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f1f889703412df385594e09c17becf67406a265f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cuConvert32uC1To32fC1Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const unsigned int *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
size_t src_stride = 2;
float *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
size_t dst_stride = 2;
float mul_constant = 1;
float add_constant = 1;
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cuConvert32uC1To32fC1Kernel<<<gridBlock,threadBlock>>>(src,src_stride,dst,dst_stride,mul_constant,add_constant,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cuConvert32uC1To32fC1Kernel<<<gridBlock,threadBlock>>>(src,src_stride,dst,dst_stride,mul_constant,add_constant,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cuConvert32uC1To32fC1Kernel<<<gridBlock,threadBlock>>>(src,src_stride,dst,dst_stride,mul_constant,add_constant,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
1b26363100222179a49878ec837b34c2928abdf6.hip | // !!! This is a file automatically generated by hipify!!!
#include "merge_func.cuh"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__global__ void MergeRank(float * d_input, float * d_output)
{
int indexA = blockIdx.x * blockDim.x + threadIdx.x;
int indexB = indexA + 2048;
float temp1 = d_input[indexA];
float temp2 = d_input[indexB];
int indexAB = 2048;
while (d_input[indexAB] < temp1) {
indexAB++;
}
int indexBA = 0;
while (d_input[indexBA] < temp2) {
indexBA++;
}
__syncthreads();
d_output[indexA + indexAB + 1] = temp1;
d_output[indexB + indexBA + 1] = temp2;
}
void orderBitonicArray(float* d_in, int size, int part_size, float* d_out, bool log)
{
/**
* \brief Order output array of the bitonic sort function
* \param d_in - a partially sorted array, global memory, gpu
* \param size - the size of the input array
* \param part_size - the size of a sorted subarray
* \param d_out - a pointer to the output array, global memory, gpu, where
* function execution result will be stored
* \param log - show information about performance during each step
* \return
* void
*/
int iter_number = static_cast<int>(log2(size / part_size));
int init_num_threads = size / (2 * part_size);
int init_num_blocks = ((init_num_threads - 1) / 1024) + 1;
if (log)
{
std::cout << "--------------------------start log--------------------------------" << std::endl;
std::cout << "Number of steps\t" << iter_number << std::endl;
}
float* t_d_in = d_in;
for (int i = 0; i < iter_number; i++)
{
if (log)
{
std::cout << "-------------------------------------------------------------------" << std::endl;
std::cout << "Merging step #" << i << std::endl;
std::cout << "Number of blocks\t" << init_num_threads << std::endl;
std::cout << "Number of threads\t" << init_num_threads << std::endl;
}
mergingKernel << <init_num_blocks, init_num_threads >> >(t_d_in, part_size, d_out);
part_size *= 2;
init_num_threads = init_num_threads / 2;
init_num_blocks = ((init_num_threads - 1) / 1024) + 1;
hipFree(t_d_in);
hipMalloc((void **)&t_d_in, size * sizeof(int));
hipMemcpy(t_d_in, d_out, size * sizeof(int), hipMemcpyDeviceToDevice);
if (log)
{
float *out = new float[size];
hipMemcpy(out, d_out, size * sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < size; i++) {
std::cout << out[i] << "\t";
}
std::cout << std::endl;
std::cout << std::endl;
delete[] out;
if (i == iter_number - 1)
{
std::cout << "----------------------------end log--------------------------------" << std::endl;
}
}
}
}
__global__ void mergingKernel(float* in_array, int part_size, float* out_array)
{
/**
* \brief kernel function for merging of the arrays of the partially sorted array
* \param in_array - the input array
* \param part_size - the size of a sorted subarray
* \param out_array - a pointer to the output array,
* where result of merging will be stored
* \return
* void
*/
int index = blockDim.x * blockIdx.x + threadIdx.x;
float* arr_left = in_array + 2 * part_size * index;
float* arr_right = arr_left + part_size;
int out_shift = 2 * part_size * index;
mergeArraysAsc(arr_left, arr_right, part_size, part_size, out_array, out_shift);
__syncthreads();
}
__device__ void mergeArraysAsc(float* arr_left, float* arr_right, int length_left, int length_right, float* out, int out_shift)
{
/**
* \brief Helper function for the mergingKernel function, merges subarrays
* \param arr_left - the first sorted array
* \param arr_right - the second sorted array
* \param length_left - size of the first array
* \param length_right - size of the second array
* \param out - a pointer to the output array, where result will be stored
* \param out_shift - shift, from which to start writing in output array.
* \return
* void
*/
int totalLength = length_left + length_right;
//running indices
int i = 0;
int j = 0;
int index = out_shift;
while (i < length_left && j < length_right)
{
if (arr_left[i] <= arr_right[j])
{
out[index] = arr_left[i];
i++;
index++;
}
else {
out[index] = arr_right[j];
j++;
index++;
}
}
//only one of these two loops will run
while (i < length_left)
{
out[index] = arr_left[i];
index++;
i++;
}
while (j < length_right)
{
out[index] = arr_right[j];
index++;
j++;
}
} | 1b26363100222179a49878ec837b34c2928abdf6.cu | #include "merge_func.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void MergeRank(float * d_input, float * d_output)
{
int indexA = blockIdx.x * blockDim.x + threadIdx.x;
int indexB = indexA + 2048;
float temp1 = d_input[indexA];
float temp2 = d_input[indexB];
int indexAB = 2048;
while (d_input[indexAB] < temp1) {
indexAB++;
}
int indexBA = 0;
while (d_input[indexBA] < temp2) {
indexBA++;
}
__syncthreads();
d_output[indexA + indexAB + 1] = temp1;
d_output[indexB + indexBA + 1] = temp2;
}
void orderBitonicArray(float* d_in, int size, int part_size, float* d_out, bool log)
{
/**
* \brief Order output array of the bitonic sort function
* \param d_in - a partially sorted array, global memory, gpu
* \param size - the size of the input array
* \param part_size - the size of a sorted subarray
* \param d_out - a pointer to the output array, global memory, gpu, where
* function execution result will be stored
* \param log - show information about performance during each step
* \return
* void
*/
int iter_number = static_cast<int>(log2(size / part_size));
int init_num_threads = size / (2 * part_size);
int init_num_blocks = ((init_num_threads - 1) / 1024) + 1;
if (log)
{
std::cout << "--------------------------start log--------------------------------" << std::endl;
std::cout << "Number of steps\t" << iter_number << std::endl;
}
float* t_d_in = d_in;
for (int i = 0; i < iter_number; i++)
{
if (log)
{
std::cout << "-------------------------------------------------------------------" << std::endl;
std::cout << "Merging step #" << i << std::endl;
std::cout << "Number of blocks\t" << init_num_threads << std::endl;
std::cout << "Number of threads\t" << init_num_threads << std::endl;
}
mergingKernel << <init_num_blocks, init_num_threads >> >(t_d_in, part_size, d_out);
part_size *= 2;
init_num_threads = init_num_threads / 2;
init_num_blocks = ((init_num_threads - 1) / 1024) + 1;
cudaFree(t_d_in);
cudaMalloc((void **)&t_d_in, size * sizeof(int));
cudaMemcpy(t_d_in, d_out, size * sizeof(int), cudaMemcpyDeviceToDevice);
if (log)
{
float *out = new float[size];
cudaMemcpy(out, d_out, size * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < size; i++) {
std::cout << out[i] << "\t";
}
std::cout << std::endl;
std::cout << std::endl;
delete[] out;
if (i == iter_number - 1)
{
std::cout << "----------------------------end log--------------------------------" << std::endl;
}
}
}
}
__global__ void mergingKernel(float* in_array, int part_size, float* out_array)
{
/**
* \brief kernel function for merging of the arrays of the partially sorted array
* \param in_array - the input array
* \param part_size - the size of a sorted subarray
* \param out_array - a pointer to the output array,
* where result of merging will be stored
* \return
* void
*/
int index = blockDim.x * blockIdx.x + threadIdx.x;
float* arr_left = in_array + 2 * part_size * index;
float* arr_right = arr_left + part_size;
int out_shift = 2 * part_size * index;
mergeArraysAsc(arr_left, arr_right, part_size, part_size, out_array, out_shift);
__syncthreads();
}
__device__ void mergeArraysAsc(float* arr_left, float* arr_right, int length_left, int length_right, float* out, int out_shift)
{
/**
* \brief Helper function for the mergingKernel function, merges subarrays
* \param arr_left - the first sorted array
* \param arr_right - the second sorted array
* \param length_left - size of the first array
* \param length_right - size of the second array
* \param out - a pointer to the output array, where result will be stored
* \param out_shift - shift, from which to start writing in output array.
* \return
* void
*/
int totalLength = length_left + length_right;
//running indices
int i = 0;
int j = 0;
int index = out_shift;
while (i < length_left && j < length_right)
{
if (arr_left[i] <= arr_right[j])
{
out[index] = arr_left[i];
i++;
index++;
}
else {
out[index] = arr_right[j];
j++;
index++;
}
}
//only one of these two loops will run
while (i < length_left)
{
out[index] = arr_left[i];
index++;
i++;
}
while (j < length_right)
{
out[index] = arr_right[j];
index++;
j++;
}
} |
b4d6da23c2ff2387aefc533eacdc9b5ef66676c2.hip | // !!! This is a file automatically generated by hipify!!!
/* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include "common.h"
#include "ppm.h"
#define BLOCK_SIZE 32
__global__ void ConvolveHGPUGMem(unsigned int *dst, const unsigned int *src, const float *kernel, int kernelSize, int w, int h)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
float finalRed = 0.0f;
float finalGreen = 0.0f;
float finalBlue = 0.0f;
for (int i = 0; i < kernelSize; i++)
{
int px = col + (i - kernelSize/2);
// Clamp to [0, w-1]
px = MIN(px, w-1);
px = MAX(px, 0);
unsigned int pixel = src[row * w + px];
unsigned char r = pixel & 0x000000ff;
unsigned char g = (pixel & 0x0000ff00) >> 8;
unsigned char b = (pixel & 0x00ff0000) >> 16;
finalRed += r * kernel[i];
finalGreen += g * kernel[i];
finalBlue += b * kernel[i];
}
unsigned char finalRed_uc = roundf(finalRed);
unsigned char finalGreen_uc = roundf(finalGreen);
unsigned char finalBlue_uc = roundf(finalBlue);
unsigned int finalPixel = finalRed_uc
| (finalGreen_uc << 8)
| (finalBlue_uc << 16);
dst[row * w + col] = finalPixel;
}
__global__ void ConvolveVGPUGMem(unsigned int *dst, const unsigned int *src, const float *kernel, int kernelSize, int w, int h)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
float finalRed = 0.0f;
float finalGreen = 0.0f;
float finalBlue = 0.0f;
for (int i = 0; i < kernelSize; i++)
{
int py = row + (i - kernelSize/2);
// Clamp to [0, h-1]
py = MIN(py, h-1);
py = MAX(py, 0);
unsigned int pixel = src[py * w + col];
unsigned char r = pixel & 0x000000ff;
unsigned char g = (pixel & 0x0000ff00) >> 8;
unsigned char b = (pixel & 0x00ff0000) >> 16;
finalRed += r * kernel[i];
finalGreen += g * kernel[i];
finalBlue += b * kernel[i];
}
unsigned char finalRed_uc = roundf(finalRed);
unsigned char finalGreen_uc = roundf(finalGreen);
unsigned char finalBlue_uc = roundf(finalBlue);
unsigned int finalPixel = finalRed_uc
| (finalGreen_uc << 8)
| (finalBlue_uc << 16);
dst[row * w + col] = finalPixel;
}
void ApplyFilterGPUGMem(PPMImage &destImg, PPMImage &srcImg, const float * kernel, unsigned int kernelSize)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(divUp(srcImg.width,BLOCK_SIZE),divUp(srcImg.height,BLOCK_SIZE));
hipLaunchKernelGGL(( ConvolveHGPUGMem), dim3(dimGrid), dim3(dimBlock), 0, 0, destImg.data, srcImg.data, kernel, kernelSize, srcImg.width, srcImg.height);
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
unsigned int * bk;
bk = srcImg.data;
srcImg.data = destImg.data;
destImg.data =bk;
hipLaunchKernelGGL(( ConvolveVGPUGMem), dim3(dimGrid), dim3(dimBlock), 0, 0, destImg.data, srcImg.data, kernel, kernelSize, srcImg.width, srcImg.height);
error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
printf("done\n");
}
| b4d6da23c2ff2387aefc533eacdc9b5ef66676c2.cu | /* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <cuda_runtime.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include "common.h"
#include "ppm.h"
#define BLOCK_SIZE 32
__global__ void ConvolveHGPUGMem(unsigned int *dst, const unsigned int *src, const float *kernel, int kernelSize, int w, int h)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
float finalRed = 0.0f;
float finalGreen = 0.0f;
float finalBlue = 0.0f;
for (int i = 0; i < kernelSize; i++)
{
int px = col + (i - kernelSize/2);
// Clamp to [0, w-1]
px = MIN(px, w-1);
px = MAX(px, 0);
unsigned int pixel = src[row * w + px];
unsigned char r = pixel & 0x000000ff;
unsigned char g = (pixel & 0x0000ff00) >> 8;
unsigned char b = (pixel & 0x00ff0000) >> 16;
finalRed += r * kernel[i];
finalGreen += g * kernel[i];
finalBlue += b * kernel[i];
}
unsigned char finalRed_uc = roundf(finalRed);
unsigned char finalGreen_uc = roundf(finalGreen);
unsigned char finalBlue_uc = roundf(finalBlue);
unsigned int finalPixel = finalRed_uc
| (finalGreen_uc << 8)
| (finalBlue_uc << 16);
dst[row * w + col] = finalPixel;
}
__global__ void ConvolveVGPUGMem(unsigned int *dst, const unsigned int *src, const float *kernel, int kernelSize, int w, int h)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
float finalRed = 0.0f;
float finalGreen = 0.0f;
float finalBlue = 0.0f;
for (int i = 0; i < kernelSize; i++)
{
int py = row + (i - kernelSize/2);
// Clamp to [0, h-1]
py = MIN(py, h-1);
py = MAX(py, 0);
unsigned int pixel = src[py * w + col];
unsigned char r = pixel & 0x000000ff;
unsigned char g = (pixel & 0x0000ff00) >> 8;
unsigned char b = (pixel & 0x00ff0000) >> 16;
finalRed += r * kernel[i];
finalGreen += g * kernel[i];
finalBlue += b * kernel[i];
}
unsigned char finalRed_uc = roundf(finalRed);
unsigned char finalGreen_uc = roundf(finalGreen);
unsigned char finalBlue_uc = roundf(finalBlue);
unsigned int finalPixel = finalRed_uc
| (finalGreen_uc << 8)
| (finalBlue_uc << 16);
dst[row * w + col] = finalPixel;
}
void ApplyFilterGPUGMem(PPMImage &destImg, PPMImage &srcImg, const float * kernel, unsigned int kernelSize)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(divUp(srcImg.width,BLOCK_SIZE),divUp(srcImg.height,BLOCK_SIZE));
ConvolveHGPUGMem<<<dimGrid, dimBlock>>>(destImg.data, srcImg.data, kernel, kernelSize, srcImg.width, srcImg.height);
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
unsigned int * bk;
bk = srcImg.data;
srcImg.data = destImg.data;
destImg.data =bk;
ConvolveVGPUGMem<<<dimGrid, dimBlock>>>(destImg.data, srcImg.data, kernel, kernelSize, srcImg.width, srcImg.height);
error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
printf("done\n");
}
|
f7684536d12a0bf0d97a1bf4b4126f13f3bf4220.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SteerForAlignmentCUDA.cuh"
#include "CUDAKernelGlobals.cuh"
#include "FlockingCommon.cuh"
extern "C"
{
__host__ void SteerForAlignmentKernelBindTextures( float4 const* pdBPosition,
float4 const* pdBDirection,
uint const numB
);
__host__ void SteerForAlignmentKernelUnindTextures( void );
__global__ void SteerForAlignmentCUDAKernel( float4 const* pdPosition,
float4 const* pdDirection,
float4 * pdSteering,
size_t const numA,
uint const* pdKNNIndices,
size_t const k,
uint const numB,
float const minDistance,
float const maxDistance,
float const cosMaxAngle,
float const fWeight,
uint * pdAppliedKernels,
uint const doNotApplyWith
);
}
texture< float4, hipTextureType1D, hipReadModeElementType> texBPosition;
texture< float4, hipTextureType1D, hipReadModeElementType> texBDirection;
__host__ void SteerForAlignmentKernelBindTextures( float4 const* pdBPosition,
float4 const* pdBDirection,
uint const numB
)
{
static hipChannelFormatDesc const float4ChannelDesc = hipCreateChannelDesc< float4 >();
CUDA_SAFE_CALL( hipBindTexture( NULL, texBPosition, pdBPosition, float4ChannelDesc, numB * sizeof(float4) ) );
CUDA_SAFE_CALL( hipBindTexture( NULL, texBDirection, pdBDirection, float4ChannelDesc, numB * sizeof(float4) ) );
}
__host__ void SteerForAlignmentKernelUnindTextures( void )
{
CUDA_SAFE_CALL( hipUnbindTexture( texBPosition ) );
CUDA_SAFE_CALL( hipUnbindTexture( texBDirection ) );
}
__global__ void SteerForAlignmentCUDAKernel( float4 const* pdPosition,
float4 const* pdDirection,
float4 * pdSteering,
size_t const numA,
uint const* pdKNNIndices,
size_t const k,
uint const numB,
float const minDistance,
float const maxDistance,
float const cosMaxAngle,
float const fWeight,
uint * pdAppliedKernels,
uint const doNotApplyWith
)
{
int const index = (blockIdx.x * blockDim.x) + threadIdx.x;
if( index >= numA )
return;
if( pdAppliedKernels[ index ] & doNotApplyWith )
return;
extern __shared__ uint shKNNIndices[];
__shared__ float3 shSteering[THREADSPERBLOCK];
__shared__ float3 shPosition[THREADSPERBLOCK];
__shared__ float3 shDirection[THREADSPERBLOCK];
// Copy required from global memory.
STEERING_SH( threadIdx.x ) = STEERING_F3( index );
POSITION_SH( threadIdx.x ) = POSITION_F3( index );
DIRECTION_SH( threadIdx.x ) = DIRECTION_F3( index );
for( int i = 0; i < k; i++ )
/*shKNNIndices[threadIdx.x*k + i] = pdKNNIndices[index*k + i];*/
shKNNIndices[ threadIdx.x + i * THREADSPERBLOCK ] = pdKNNIndices[ index + i * numA ];
__syncthreads();
// steering accumulator and count of neighbors, both initially zero
float3 steering = { 0.f, 0.f, 0.f };
uint neighbors = 0;
// For each agent in this agent's KNN neighborhood...
for( uint i = 0; i < k; i++ )
{
//uint BIndex = shKNNIndices[threadIdx.x * k + i];
uint BIndex = shKNNIndices[ threadIdx.x + i * THREADSPERBLOCK ];
// Check for end of KNN.
if( BIndex >= numB )
break;
float3 const bPosition = make_float3( tex1Dfetch( texBPosition, BIndex ) );
float3 const bDirection = make_float3( tex1Dfetch( texBDirection, BIndex ) );
if( inBoidNeighborhood( POSITION_SH( threadIdx.x ), DIRECTION_SH( threadIdx.x ), bPosition, minDistance, maxDistance, cosMaxAngle ) )
{
// accumulate sum of neighbor's positions
steering = float3_add( steering, bDirection );
// count neighbors
neighbors++;
}
}
if( neighbors > 0 )
steering = float3_normalize( float3_subtract( float3_scalar_divide( steering, (float)neighbors ), DIRECTION_SH( threadIdx.x ) ) );
// Apply the weight.
steering = float3_scalar_multiply( steering, fWeight );
// Set the applied kernel bit.
if( ! float3_equals( steering, float3_zero() ) )
pdAppliedKernels[ index ] |= KERNEL_ALIGNMENT_BIT;
// Add into the steering vector.
STEERING_SH( threadIdx.x ) = float3_add( steering, STEERING_SH( threadIdx.x ) );
// Write back to global memory.
STEERING( index ) = STEERING_SH_F4( threadIdx.x );
}
| f7684536d12a0bf0d97a1bf4b4126f13f3bf4220.cu | #include "SteerForAlignmentCUDA.cuh"
#include "CUDAKernelGlobals.cuh"
#include "FlockingCommon.cuh"
extern "C"
{
__host__ void SteerForAlignmentKernelBindTextures( float4 const* pdBPosition,
float4 const* pdBDirection,
uint const numB
);
__host__ void SteerForAlignmentKernelUnindTextures( void );
__global__ void SteerForAlignmentCUDAKernel( float4 const* pdPosition,
float4 const* pdDirection,
float4 * pdSteering,
size_t const numA,
uint const* pdKNNIndices,
size_t const k,
uint const numB,
float const minDistance,
float const maxDistance,
float const cosMaxAngle,
float const fWeight,
uint * pdAppliedKernels,
uint const doNotApplyWith
);
}
texture< float4, cudaTextureType1D, cudaReadModeElementType> texBPosition;
texture< float4, cudaTextureType1D, cudaReadModeElementType> texBDirection;
__host__ void SteerForAlignmentKernelBindTextures( float4 const* pdBPosition,
float4 const* pdBDirection,
uint const numB
)
{
static cudaChannelFormatDesc const float4ChannelDesc = cudaCreateChannelDesc< float4 >();
CUDA_SAFE_CALL( cudaBindTexture( NULL, texBPosition, pdBPosition, float4ChannelDesc, numB * sizeof(float4) ) );
CUDA_SAFE_CALL( cudaBindTexture( NULL, texBDirection, pdBDirection, float4ChannelDesc, numB * sizeof(float4) ) );
}
__host__ void SteerForAlignmentKernelUnindTextures( void )
{
CUDA_SAFE_CALL( cudaUnbindTexture( texBPosition ) );
CUDA_SAFE_CALL( cudaUnbindTexture( texBDirection ) );
}
__global__ void SteerForAlignmentCUDAKernel( float4 const* pdPosition,
float4 const* pdDirection,
float4 * pdSteering,
size_t const numA,
uint const* pdKNNIndices,
size_t const k,
uint const numB,
float const minDistance,
float const maxDistance,
float const cosMaxAngle,
float const fWeight,
uint * pdAppliedKernels,
uint const doNotApplyWith
)
{
int const index = (blockIdx.x * blockDim.x) + threadIdx.x;
if( index >= numA )
return;
if( pdAppliedKernels[ index ] & doNotApplyWith )
return;
extern __shared__ uint shKNNIndices[];
__shared__ float3 shSteering[THREADSPERBLOCK];
__shared__ float3 shPosition[THREADSPERBLOCK];
__shared__ float3 shDirection[THREADSPERBLOCK];
// Copy required from global memory.
STEERING_SH( threadIdx.x ) = STEERING_F3( index );
POSITION_SH( threadIdx.x ) = POSITION_F3( index );
DIRECTION_SH( threadIdx.x ) = DIRECTION_F3( index );
for( int i = 0; i < k; i++ )
/*shKNNIndices[threadIdx.x*k + i] = pdKNNIndices[index*k + i];*/
shKNNIndices[ threadIdx.x + i * THREADSPERBLOCK ] = pdKNNIndices[ index + i * numA ];
__syncthreads();
// steering accumulator and count of neighbors, both initially zero
float3 steering = { 0.f, 0.f, 0.f };
uint neighbors = 0;
// For each agent in this agent's KNN neighborhood...
for( uint i = 0; i < k; i++ )
{
//uint BIndex = shKNNIndices[threadIdx.x * k + i];
uint BIndex = shKNNIndices[ threadIdx.x + i * THREADSPERBLOCK ];
// Check for end of KNN.
if( BIndex >= numB )
break;
float3 const bPosition = make_float3( tex1Dfetch( texBPosition, BIndex ) );
float3 const bDirection = make_float3( tex1Dfetch( texBDirection, BIndex ) );
if( inBoidNeighborhood( POSITION_SH( threadIdx.x ), DIRECTION_SH( threadIdx.x ), bPosition, minDistance, maxDistance, cosMaxAngle ) )
{
// accumulate sum of neighbor's positions
steering = float3_add( steering, bDirection );
// count neighbors
neighbors++;
}
}
if( neighbors > 0 )
steering = float3_normalize( float3_subtract( float3_scalar_divide( steering, (float)neighbors ), DIRECTION_SH( threadIdx.x ) ) );
// Apply the weight.
steering = float3_scalar_multiply( steering, fWeight );
// Set the applied kernel bit.
if( ! float3_equals( steering, float3_zero() ) )
pdAppliedKernels[ index ] |= KERNEL_ALIGNMENT_BIT;
// Add into the steering vector.
STEERING_SH( threadIdx.x ) = float3_add( steering, STEERING_SH( threadIdx.x ) );
// Write back to global memory.
STEERING( index ) = STEERING_SH_F4( threadIdx.x );
}
|
c2cb078aadd69c9971d89090cbfba520397d992b.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <cusparse_v2.h>
#include "rocblas.h"
#include <hiprand/hiprand.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "mex.h"
#include "kcDefs.h" //see for info on anything starting with KC_
#include "kcArrayFunctions.h"
//poison log likelihood for one observation
__device__ KC_FP_TYPE lh(KC_FP_TYPE y, KC_FP_TYPE x, KC_FP_TYPE g, KC_FP_TYPE dt, KC_FP_TYPE sh, KC_FP_TYPE bias) {
KC_FP_TYPE fr = KC_MAX(KC_MIN((log1p(KC_EXP(g*x))+bias)*exp(sh),KC_MAXN),KC_MINN);
return y*(KC_LOG(fr)+KC_LOG(dt)) - dt*fr - KC_GAMMALN(y+1.0);
}
//sums up log likelihood of each trial given model parameters
__global__ void kcSumGBfinal(const KC_FP_TYPE * log_p_tr, KC_FP_TYPE * log_p, const int NT) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx < 1) {
log_p[0] = 0;
for(int ii = 0; ii < NT; ii++) {
log_p[0] += log_p_tr[ii];
}
}
}
//averages log likelihood of each simulated path
// (one thread for each trial)
__global__ void kcSumGBlogpTr(const KC_FP_TYPE * log_p, KC_FP_TYPE * log_p_tr, const int NT, const int nSims) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < NT) {
log_p_tr[idx] = 0;
KC_FP_TYPE trSum = 0;
KC_FP_TYPE log_x = 0;
log_p_tr[idx] = KC_SQRT(-1.0);
//computes log( 1/nSims * \sum exp( log p(y | sim paths)) ) for a single trial
// does the sum in a slightly more numerical stable way than just blindly exponentiating all the log likleihoods
for(int ii = 0; ii < nSims && isnan(log_p_tr[idx]);ii++) {
trSum = 1 ;
log_x = log_p[ii*NT+idx];
for(int kk = 0; kk < ii; kk++) {
trSum += KC_EXP(log_p[kk*NT+idx] - log_x);
}
for(int kk = ii+1; kk < nSims; kk++) {
trSum += KC_EXP(log_p[kk*NT+idx] - log_x);
}
if(trSum > 1e-25 && !isnan(trSum) && !isinf(trSum)) {
log_p_tr[idx] = log_x-KC_LOG((double)nSims)+KC_LOG(trSum);
break;
}
}
}
}
//simulates a ramping (diffusion-to-bound) path for each trial and computes likelihood
__global__ void kcSimGBPaths(const KC_FP_TYPE * y, const int * trIdx, const int * betaIdx, KC_FP_TYPE * xx, const KC_FP_TYPE * b,const KC_FP_TYPE w2,const KC_FP_TYPE l_0, const KC_FP_TYPE g, const KC_FP_TYPE dt, KC_FP_TYPE * log_p, const int NT, const int TT, const int sim, KC_FP_TYPE * spe, KC_FP_TYPE bias) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < NT ) {
int trNum = idx;
int T1 = trIdx[trNum];
//xx contains zero mean Gaussian noise of variance \omega^2
xx[T1] += l_0; //xx[T1] now contains initial point for simulated diffusion trajectory for this trial
int currIdx = sim*(NT)+idx;
log_p[currIdx] = lh(y[T1],xx[T1],g,dt,spe[T1],bias);
for(int ii = T1+1; ii < trIdx[trNum+1];ii++) {
//progates particle forward in time
xx[ii] = (xx[ii-1] >= 1.0)?1.0:KC_MIN(xx[ii] + xx[ii-1]+b[betaIdx[ii]],1.0);
//log likelihood of single observation (bin) y[ii] given diffusion path is at x[ii]
log_p[currIdx] += lh(y[ii],xx[ii],g,dt,spe[ii],bias);
}
}
}
//Estimates the log probability of a set of spike trains under the ramping model given a set of fixed parameters
// This estimation is made by Monte Carlo simulations from the model to integrate out latent variable
//args
// 0 = y (observations)
// 1 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y)
// 2 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB)
// 3 = spike history effect (same size as y)
// 4 = beta values
// 5 = w (variance of diffusion process)
// 6 = l_0 (starting lambda value)
// 7 = g (absorbing boundary effective height)
// 8 = dt (bin size in seconds)
// 9 = number of samples to use to estimate log probability of observations (I recommend using at least 1000)
// 10 = bias
//outputs (left-hand side)
// 0 = log p(y|\theta)
// 1 = log p(y|\theta) for each individual trial
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
hipError_t ce;
//load up trial data
unsigned int TT = kcGetArrayNumEl(prhs[0]);
KC_FP_TYPE * y = kcGetArrayData(prhs[0]);
int * trIdx = kcGetArrayDataInt(prhs[1]);
unsigned int NT = kcGetArrayNumEl(prhs[1])-1;
int * betaIdx = kcGetArrayDataInt(prhs[2],TT);
// load spike history effect
KC_FP_TYPE * spe = kcGetArrayData(prhs[3]);
//how many simulations to use to estimate log p(y|\theta)
int trialsToSim = (int)mxGetScalar(prhs[9]);
//load up parameters to simulate model
if(mxGetClassID(prhs[4]) != KC_FP_TYPE_MATLAB) {
mexErrMsgTxt("Beta input wrong floating point type (kcSimGaussianBound)!");
}
KC_FP_TYPE * b = (KC_FP_TYPE *)mxGetPr(prhs[4]);
int numBetas = mxGetNumberOfElements(prhs[4]);
KC_FP_TYPE * b_gpu;
ce = hipMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas);
if(ce != hipSuccess) {
mexPrintf("Error allocating space for betas on device - first allocation in function (kcSimGaussianBound) ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
}
checkCudaErrors(hipMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,hipMemcpyHostToDevice));
KC_FP_TYPE w = mxGetScalar(prhs[5]);
KC_FP_TYPE l_0 = mxGetScalar(prhs[6]);
KC_FP_TYPE g = mxGetScalar(prhs[7]);
KC_FP_TYPE dt = mxGetScalar(prhs[8]);
KC_FP_TYPE bias = mxGetScalar(prhs[10]);
//setup CUDA variables + random number generator
int randSize = TT + (((TT)%2==0)?0:1);
KC_FP_TYPE * xx;
checkCudaErrors(hipMalloc((void**)&xx,randSize*sizeof(KC_FP_TYPE)));
hiprandGenerator_t curandGen = 0;
hiprandStatus_t hiprandStatus_t;
hiprandStatus_t = hiprandCreateGenerator(&curandGen, HIPRAND_RNG_PSEUDO_DEFAULT);
if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) {
mexPrintf("CURAND-1 error %d\n",(int)hiprandStatus_t);
mexErrMsgTxt("CUDA errors");
}
struct timeval now;
gettimeofday(&now,NULL);
unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec);
hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(curandGen, mySeed);
if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) {
mexPrintf("CURAND-2 error %d\n",(int)hiprandStatus_t);
mexErrMsgTxt("CUDA errors");
}
int blockSize = 2;
int nBlocks = NT/blockSize + ((NT%blockSize==0)?0:1);
int blockSizeT = 2;
int nBlocksT = NT/blockSizeT + ((NT%blockSizeT==0)?0:1);
//allocates sspace on GPU for simulating the likelihood
KC_FP_TYPE * log_p;
//KC_FP_TYPE * log_p_2;
KC_FP_TYPE * log_p_tr;
KC_FP_TYPE * sum_log_p;
checkCudaErrors(hipMalloc((void**)&log_p,sizeof(KC_FP_TYPE)*NT*trialsToSim));
//checkCudaErrors(hipMalloc((void**)&log_p_2,sizeof(KC_FP_TYPE)*NT*trialsToSim));
checkCudaErrors(hipMalloc((void**)&log_p_tr,sizeof(KC_FP_TYPE)*NT));
checkCudaErrors(hipMalloc((void**)&sum_log_p,sizeof(KC_FP_TYPE)*1));
// generate AR1 noise
for(int kk = 0; kk < trialsToSim; kk++) {
//generates zero mean Gaussian noise with correct variance
hiprandStatus_t = KC_RANDOM_NORMAL_FUNCTION(curandGen,xx,randSize,0,KC_SQRT(w));
if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) {
mexPrintf("CURAND gen error %d\n",(int)hiprandStatus_t);
mexErrMsgTxt("CUDA errors");
}
//checkCudaErrors(hipDeviceSynchronize());
//calculate path + logP
hipLaunchKernelGGL(( kcSimGBPaths), dim3(nBlocks),dim3(blockSize), 0, 0, y,trIdx,betaIdx,xx,b_gpu,w,l_0,g,dt,log_p,NT,TT,kk,spe,bias);
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error in simulating of kcSimGaussianBound.cu ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA errors");
}
}
// log_p_2 = log_p;
//average likelihood of each sampled path to get log p(y|\theta) for each trial
hipLaunchKernelGGL(( kcSumGBlogpTr), dim3(nBlocksT),dim3(blockSizeT), 0, 0, log_p,log_p_tr,NT,trialsToSim);
checkCudaErrors(hipDeviceSynchronize());
//sums up log likelihood of each trial
hipLaunchKernelGGL(( kcSumGBfinal), dim3(1),dim3(1), 0, 0, log_p_tr,sum_log_p,NT);
checkCudaErrors(hipDeviceSynchronize());
//copy back to host
if(nlhs > 0) {
plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(hipMemcpy((KC_FP_TYPE *)mxGetPr(plhs[0]),sum_log_p,1*sizeof(KC_FP_TYPE),hipMemcpyDeviceToHost));
}
if(nlhs > 1) {
plhs[1] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(hipMemcpy((KC_FP_TYPE *)mxGetPr(plhs[1]),log_p_tr,NT*sizeof(KC_FP_TYPE),hipMemcpyDeviceToHost));
}
//free up CUDA variables
checkCudaErrors(hiprandDestroyGenerator(curandGen));
checkCudaErrors(hipFree(xx));
checkCudaErrors(hipFree(b_gpu));
checkCudaErrors(hipFree(log_p));
checkCudaErrors(hipFree(log_p_tr));
checkCudaErrors(hipFree(sum_log_p));
}
| c2cb078aadd69c9971d89090cbfba520397d992b.cu | #include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cuda_runtime.h>
#include <cusparse_v2.h>
#include "cublas_v2.h"
#include <curand.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "mex.h"
#include "kcDefs.h" //see for info on anything starting with KC_
#include "kcArrayFunctions.h"
//poison log likelihood for one observation
__device__ KC_FP_TYPE lh(KC_FP_TYPE y, KC_FP_TYPE x, KC_FP_TYPE g, KC_FP_TYPE dt, KC_FP_TYPE sh, KC_FP_TYPE bias) {
KC_FP_TYPE fr = KC_MAX(KC_MIN((log1p(KC_EXP(g*x))+bias)*exp(sh),KC_MAXN),KC_MINN);
return y*(KC_LOG(fr)+KC_LOG(dt)) - dt*fr - KC_GAMMALN(y+1.0);
}
//sums up log likelihood of each trial given model parameters
__global__ void kcSumGBfinal(const KC_FP_TYPE * log_p_tr, KC_FP_TYPE * log_p, const int NT) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx < 1) {
log_p[0] = 0;
for(int ii = 0; ii < NT; ii++) {
log_p[0] += log_p_tr[ii];
}
}
}
//averages log likelihood of each simulated path
// (one thread for each trial)
__global__ void kcSumGBlogpTr(const KC_FP_TYPE * log_p, KC_FP_TYPE * log_p_tr, const int NT, const int nSims) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < NT) {
log_p_tr[idx] = 0;
KC_FP_TYPE trSum = 0;
KC_FP_TYPE log_x = 0;
log_p_tr[idx] = KC_SQRT(-1.0);
//computes log( 1/nSims * \sum exp( log p(y | sim paths)) ) for a single trial
// does the sum in a slightly more numerical stable way than just blindly exponentiating all the log likleihoods
for(int ii = 0; ii < nSims && isnan(log_p_tr[idx]);ii++) {
trSum = 1 ;
log_x = log_p[ii*NT+idx];
for(int kk = 0; kk < ii; kk++) {
trSum += KC_EXP(log_p[kk*NT+idx] - log_x);
}
for(int kk = ii+1; kk < nSims; kk++) {
trSum += KC_EXP(log_p[kk*NT+idx] - log_x);
}
if(trSum > 1e-25 && !isnan(trSum) && !isinf(trSum)) {
log_p_tr[idx] = log_x-KC_LOG((double)nSims)+KC_LOG(trSum);
break;
}
}
}
}
//simulates a ramping (diffusion-to-bound) path for each trial and computes likelihood
__global__ void kcSimGBPaths(const KC_FP_TYPE * y, const int * trIdx, const int * betaIdx, KC_FP_TYPE * xx, const KC_FP_TYPE * b,const KC_FP_TYPE w2,const KC_FP_TYPE l_0, const KC_FP_TYPE g, const KC_FP_TYPE dt, KC_FP_TYPE * log_p, const int NT, const int TT, const int sim, KC_FP_TYPE * spe, KC_FP_TYPE bias) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < NT ) {
int trNum = idx;
int T1 = trIdx[trNum];
//xx contains zero mean Gaussian noise of variance \omega^2
xx[T1] += l_0; //xx[T1] now contains initial point for simulated diffusion trajectory for this trial
int currIdx = sim*(NT)+idx;
log_p[currIdx] = lh(y[T1],xx[T1],g,dt,spe[T1],bias);
for(int ii = T1+1; ii < trIdx[trNum+1];ii++) {
//progates particle forward in time
xx[ii] = (xx[ii-1] >= 1.0)?1.0:KC_MIN(xx[ii] + xx[ii-1]+b[betaIdx[ii]],1.0);
//log likelihood of single observation (bin) y[ii] given diffusion path is at x[ii]
log_p[currIdx] += lh(y[ii],xx[ii],g,dt,spe[ii],bias);
}
}
}
//Estimates the log probability of a set of spike trains under the ramping model given a set of fixed parameters
// This estimation is made by Monte Carlo simulations from the model to integrate out latent variable
//args
// 0 = y (observations)
// 1 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y)
// 2 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB)
// 3 = spike history effect (same size as y)
// 4 = beta values
// 5 = w (variance of diffusion process)
// 6 = l_0 (starting lambda value)
// 7 = g (absorbing boundary effective height)
// 8 = dt (bin size in seconds)
// 9 = number of samples to use to estimate log probability of observations (I recommend using at least 1000)
// 10 = bias
//outputs (left-hand side)
// 0 = log p(y|\theta)
// 1 = log p(y|\theta) for each individual trial
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
cudaError_t ce;
//load up trial data
unsigned int TT = kcGetArrayNumEl(prhs[0]);
KC_FP_TYPE * y = kcGetArrayData(prhs[0]);
int * trIdx = kcGetArrayDataInt(prhs[1]);
unsigned int NT = kcGetArrayNumEl(prhs[1])-1;
int * betaIdx = kcGetArrayDataInt(prhs[2],TT);
// load spike history effect
KC_FP_TYPE * spe = kcGetArrayData(prhs[3]);
//how many simulations to use to estimate log p(y|\theta)
int trialsToSim = (int)mxGetScalar(prhs[9]);
//load up parameters to simulate model
if(mxGetClassID(prhs[4]) != KC_FP_TYPE_MATLAB) {
mexErrMsgTxt("Beta input wrong floating point type (kcSimGaussianBound)!");
}
KC_FP_TYPE * b = (KC_FP_TYPE *)mxGetPr(prhs[4]);
int numBetas = mxGetNumberOfElements(prhs[4]);
KC_FP_TYPE * b_gpu;
ce = cudaMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas);
if(ce != cudaSuccess) {
mexPrintf("Error allocating space for betas on device - first allocation in function (kcSimGaussianBound) ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
}
checkCudaErrors(cudaMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,cudaMemcpyHostToDevice));
KC_FP_TYPE w = mxGetScalar(prhs[5]);
KC_FP_TYPE l_0 = mxGetScalar(prhs[6]);
KC_FP_TYPE g = mxGetScalar(prhs[7]);
KC_FP_TYPE dt = mxGetScalar(prhs[8]);
KC_FP_TYPE bias = mxGetScalar(prhs[10]);
//setup CUDA variables + random number generator
int randSize = TT + (((TT)%2==0)?0:1);
KC_FP_TYPE * xx;
checkCudaErrors(cudaMalloc((void**)&xx,randSize*sizeof(KC_FP_TYPE)));
curandGenerator_t curandGen = 0;
curandStatus_t curandStatus;
curandStatus = curandCreateGenerator(&curandGen, CURAND_RNG_PSEUDO_DEFAULT);
if(curandStatus != CURAND_STATUS_SUCCESS ) {
mexPrintf("CURAND-1 error %d\n",(int)curandStatus);
mexErrMsgTxt("CUDA errors");
}
struct timeval now;
gettimeofday(&now,NULL);
unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec);
curandStatus = curandSetPseudoRandomGeneratorSeed(curandGen, mySeed);
if(curandStatus != CURAND_STATUS_SUCCESS ) {
mexPrintf("CURAND-2 error %d\n",(int)curandStatus);
mexErrMsgTxt("CUDA errors");
}
int blockSize = 2;
int nBlocks = NT/blockSize + ((NT%blockSize==0)?0:1);
int blockSizeT = 2;
int nBlocksT = NT/blockSizeT + ((NT%blockSizeT==0)?0:1);
//allocates sspace on GPU for simulating the likelihood
KC_FP_TYPE * log_p;
//KC_FP_TYPE * log_p_2;
KC_FP_TYPE * log_p_tr;
KC_FP_TYPE * sum_log_p;
checkCudaErrors(cudaMalloc((void**)&log_p,sizeof(KC_FP_TYPE)*NT*trialsToSim));
//checkCudaErrors(cudaMalloc((void**)&log_p_2,sizeof(KC_FP_TYPE)*NT*trialsToSim));
checkCudaErrors(cudaMalloc((void**)&log_p_tr,sizeof(KC_FP_TYPE)*NT));
checkCudaErrors(cudaMalloc((void**)&sum_log_p,sizeof(KC_FP_TYPE)*1));
// generate AR1 noise
for(int kk = 0; kk < trialsToSim; kk++) {
//generates zero mean Gaussian noise with correct variance
curandStatus = KC_RANDOM_NORMAL_FUNCTION(curandGen,xx,randSize,0,KC_SQRT(w));
if(curandStatus != CURAND_STATUS_SUCCESS ) {
mexPrintf("CURAND gen error %d\n",(int)curandStatus);
mexErrMsgTxt("CUDA errors");
}
//checkCudaErrors(cudaDeviceSynchronize());
//calculate path + logP
kcSimGBPaths<<<nBlocks,blockSize>>>(y,trIdx,betaIdx,xx,b_gpu,w,l_0,g,dt,log_p,NT,TT,kk,spe,bias);
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error in simulating of kcSimGaussianBound.cu ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA errors");
}
}
// log_p_2 = log_p;
//average likelihood of each sampled path to get log p(y|\theta) for each trial
kcSumGBlogpTr<<<nBlocksT,blockSizeT>>>(log_p,log_p_tr,NT,trialsToSim);
checkCudaErrors(cudaDeviceSynchronize());
//sums up log likelihood of each trial
kcSumGBfinal<<<1,1>>>(log_p_tr,sum_log_p,NT);
checkCudaErrors(cudaDeviceSynchronize());
//copy back to host
if(nlhs > 0) {
plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(cudaMemcpy((KC_FP_TYPE *)mxGetPr(plhs[0]),sum_log_p,1*sizeof(KC_FP_TYPE),cudaMemcpyDeviceToHost));
}
if(nlhs > 1) {
plhs[1] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(cudaMemcpy((KC_FP_TYPE *)mxGetPr(plhs[1]),log_p_tr,NT*sizeof(KC_FP_TYPE),cudaMemcpyDeviceToHost));
}
//free up CUDA variables
checkCudaErrors(curandDestroyGenerator(curandGen));
checkCudaErrors(cudaFree(xx));
checkCudaErrors(cudaFree(b_gpu));
checkCudaErrors(cudaFree(log_p));
checkCudaErrors(cudaFree(log_p_tr));
checkCudaErrors(cudaFree(sum_log_p));
}
|
9450a9f19d888aa9df044ae39e652b2e9bcd981f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_exp2 (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(exp2)(a[offset_a + gid_0 + gid_1 * ld_a]);
}
} | 9450a9f19d888aa9df044ae39e652b2e9bcd981f.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_exp2 (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(exp2)(a[offset_a + gid_0 + gid_1 * ld_a]);
}
} |
af40598e932f7a05c7eaf33a29ce5e83dba650c8.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include <math.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime_api.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <torch/extension.h>
#include "softmax.h"
#include "dropout.h"
// symbol to be automatically resolved by PyTorch libs
extern THCState *state;
namespace multihead_attn {
namespace fused_softmax {
namespace additive_mask_softmax_dropout {
std::vector<torch::Tensor> fwd_cuda(
bool is_training,
int heads,
torch::Tensor const& input,
const half* pad_mask,
float dropout_prob
)
{
const int attn_batches = input.size(0);
const int sequences = attn_batches / heads;
const int q_seq_len = input.size(1);
const int k_seq_len = q_seq_len;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
// There is no reason to use more than one stream as every kernel is
// sequentially dependent
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
hipblasSetStream(handle, stream);
// 3 Intermediate Results + Output (Note: dropout intermediates are generated by ATen library code)
auto act_options = input.options().requires_grad(false);
auto mask_options = act_options.dtype(torch::kUInt8);
torch::Tensor softmax_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_mask = torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options);
// Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
void* input_ptr = static_cast<void*>(input.data_ptr());
void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr());
// Padded Softmax
bool softmax_success = false;
if (pad_mask == nullptr) {
softmax_success = dispatch_softmax<half, half, float>(
reinterpret_cast<half*>(softmax_results_ptr),
reinterpret_cast<const half*>(input_ptr),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len);
} else {
softmax_success = dispatch_additive_masked_softmax<half, half, float>(
reinterpret_cast<half*>(softmax_results_ptr),
reinterpret_cast<const half*>(input_ptr),
pad_mask,
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
attn_batches*q_seq_len/sequences);
}
if (is_training) {
//use at:: function so that C++ version generates the same random mask as python version
auto dropout_tuple = at::_fused_dropout(softmax_results, 1.0f-dropout_prob);
dropout_results = std::get<0>(dropout_tuple);
dropout_mask = std::get<1>(dropout_tuple);
}
// Matmul2
return {
dropout_results,
dropout_mask,
softmax_results
};
}
torch::Tensor bwd_cuda(
int heads,
torch::Tensor const& output_grads,
torch::Tensor const& softmax_results,
torch::Tensor const& dropout_mask,
float dropout_prob
)
{
const int attn_batches = output_grads.size(0);
const int q_seq_len = output_grads.size(1);
const int k_seq_len = q_seq_len;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
// TODO: Streams can be used in Backprop but I haven't added more than one
// in my first attempt to create the code
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
hipblasSetStream(handle, stream);
// Output Tensor Allocations
// torch::Tensor input_grads = torch::empty_like(output_grads);
// Apply Dropout Mask and Scale by Dropout Probability
// Softmax Grad
dispatch_masked_scale_softmax_backward_stream<half, half, float,false>(
static_cast<half*>(output_grads.data_ptr()),
static_cast<half*>(output_grads.data_ptr()),
reinterpret_cast<half const*>(softmax_results.data_ptr()),
static_cast<uint8_t const*>(dropout_mask.data_ptr()),
1.0/(1.0-dropout_prob),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len, stream);
//backward pass is completely in-place
return output_grads;
}
}
}
}
| af40598e932f7a05c7eaf33a29ce5e83dba650c8.cu | #include <vector>
#include <math.h>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cuda_profiler_api.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <torch/extension.h>
#include "softmax.h"
#include "dropout.h"
// symbol to be automatically resolved by PyTorch libs
extern THCState *state;
namespace multihead_attn {
namespace fused_softmax {
namespace additive_mask_softmax_dropout {
std::vector<torch::Tensor> fwd_cuda(
bool is_training,
int heads,
torch::Tensor const& input,
const half* pad_mask,
float dropout_prob
)
{
const int attn_batches = input.size(0);
const int sequences = attn_batches / heads;
const int q_seq_len = input.size(1);
const int k_seq_len = q_seq_len;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
// There is no reason to use more than one stream as every kernel is
// sequentially dependent
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// 3 Intermediate Results + Output (Note: dropout intermediates are generated by ATen library code)
auto act_options = input.options().requires_grad(false);
auto mask_options = act_options.dtype(torch::kUInt8);
torch::Tensor softmax_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_mask = torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options);
// Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
void* input_ptr = static_cast<void*>(input.data_ptr());
void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr());
// Padded Softmax
bool softmax_success = false;
if (pad_mask == nullptr) {
softmax_success = dispatch_softmax<half, half, float>(
reinterpret_cast<half*>(softmax_results_ptr),
reinterpret_cast<const half*>(input_ptr),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len);
} else {
softmax_success = dispatch_additive_masked_softmax<half, half, float>(
reinterpret_cast<half*>(softmax_results_ptr),
reinterpret_cast<const half*>(input_ptr),
pad_mask,
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
attn_batches*q_seq_len/sequences);
}
if (is_training) {
//use at:: function so that C++ version generates the same random mask as python version
auto dropout_tuple = at::_fused_dropout(softmax_results, 1.0f-dropout_prob);
dropout_results = std::get<0>(dropout_tuple);
dropout_mask = std::get<1>(dropout_tuple);
}
// Matmul2
return {
dropout_results,
dropout_mask,
softmax_results
};
}
torch::Tensor bwd_cuda(
int heads,
torch::Tensor const& output_grads,
torch::Tensor const& softmax_results,
torch::Tensor const& dropout_mask,
float dropout_prob
)
{
const int attn_batches = output_grads.size(0);
const int q_seq_len = output_grads.size(1);
const int k_seq_len = q_seq_len;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
// TODO: Streams can be used in Backprop but I haven't added more than one
// in my first attempt to create the code
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// Output Tensor Allocations
// torch::Tensor input_grads = torch::empty_like(output_grads);
// Apply Dropout Mask and Scale by Dropout Probability
// Softmax Grad
dispatch_masked_scale_softmax_backward_stream<half, half, float,false>(
static_cast<half*>(output_grads.data_ptr()),
static_cast<half*>(output_grads.data_ptr()),
reinterpret_cast<half const*>(softmax_results.data_ptr()),
static_cast<uint8_t const*>(dropout_mask.data_ptr()),
1.0/(1.0-dropout_prob),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len, stream);
//backward pass is completely in-place
return output_grads;
}
}
}
}
|
30f62ccf4c195cc0c6ba330e0834acecd41214d6.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2019 by Contributors
* @file array/cpu/array_index_select.cu
* @brief Array index select GPU implementation
*/
#include <dgl/array.h>
#include "../../runtime/cuda/cuda_common.h"
#include "./array_index_select.cuh"
#include "./utils.h"
namespace dgl {
using runtime::NDArray;
namespace aten {
namespace impl {
template <DGLDeviceType XPU, typename DType, typename IdType>
NDArray IndexSelect(NDArray array, IdArray index) {
const int64_t arr_len = array->shape[0];
const int64_t len = index->shape[0];
int64_t num_feat = 1;
std::vector<int64_t> shape{len};
for (int d = 1; d < array->ndim; ++d) {
num_feat *= array->shape[d];
shape.emplace_back(array->shape[d]);
}
// use index->ctx for pinned array
NDArray ret = NDArray::Empty(shape, array->dtype, index->ctx);
if (len == 0 || arr_len * num_feat == 0) return ret;
DType* ret_data = static_cast<DType*>(ret->data);
const DType* array_data = static_cast<DType*>(cuda::GetDevicePointer(array));
const IdType* idx_data = static_cast<IdType*>(index->data);
hipStream_t stream = runtime::getCurrentHIPStreamMasqueradingAsCUDA();
if (num_feat == 1) {
const int nt = cuda::FindNumThreads(len);
const int nb = (len + nt - 1) / nt;
CUDA_KERNEL_CALL(
IndexSelectSingleKernel, nb, nt, 0, stream, array_data, idx_data, len,
arr_len, ret_data);
} else {
dim3 block(256, 1);
while (static_cast<int64_t>(block.x) >= 2 * num_feat) {
block.x /= 2;
block.y *= 2;
}
const dim3 grid((len + block.y - 1) / block.y);
CUDA_KERNEL_CALL(
IndexSelectMultiKernel, grid, block, 0, stream, array_data, num_feat,
idx_data, len, arr_len, ret_data);
}
return ret;
}
template NDArray IndexSelect<kDGLCUDA, int32_t, int32_t>(NDArray, IdArray);
template NDArray IndexSelect<kDGLCUDA, int32_t, int64_t>(NDArray, IdArray);
template NDArray IndexSelect<kDGLCUDA, int64_t, int32_t>(NDArray, IdArray);
template NDArray IndexSelect<kDGLCUDA, int64_t, int64_t>(NDArray, IdArray);
template NDArray IndexSelect<kDGLCUDA, __half, int32_t>(NDArray, IdArray);
template NDArray IndexSelect<kDGLCUDA, __half, int64_t>(NDArray, IdArray);
#if BF16_ENABLED
template NDArray IndexSelect<kDGLCUDA, __nv_bfloat16, int32_t>(
NDArray, IdArray);
template NDArray IndexSelect<kDGLCUDA, __nv_bfloat16, int64_t>(
NDArray, IdArray);
#endif // BF16_ENABLED
template NDArray IndexSelect<kDGLCUDA, float, int32_t>(NDArray, IdArray);
template NDArray IndexSelect<kDGLCUDA, float, int64_t>(NDArray, IdArray);
template NDArray IndexSelect<kDGLCUDA, double, int32_t>(NDArray, IdArray);
template NDArray IndexSelect<kDGLCUDA, double, int64_t>(NDArray, IdArray);
template <DGLDeviceType XPU, typename DType>
DType IndexSelect(NDArray array, int64_t index) {
auto device = runtime::DeviceAPI::Get(array->ctx);
DType ret = static_cast<DType>(0.0f);
device->CopyDataFromTo(
static_cast<DType*>(array->data) + index, 0, &ret, 0, sizeof(DType),
array->ctx, DGLContext{kDGLCPU, 0}, array->dtype);
return ret;
}
template int32_t IndexSelect<kDGLCUDA, int32_t>(NDArray array, int64_t index);
template int64_t IndexSelect<kDGLCUDA, int64_t>(NDArray array, int64_t index);
template uint32_t IndexSelect<kDGLCUDA, uint32_t>(NDArray array, int64_t index);
template uint64_t IndexSelect<kDGLCUDA, uint64_t>(NDArray array, int64_t index);
template __half IndexSelect<kDGLCUDA, __half>(NDArray array, int64_t index);
#if BF16_ENABLED
template __nv_bfloat16 IndexSelect<kDGLCUDA, __nv_bfloat16>(
NDArray array, int64_t index);
#endif // BF16_ENABLED
template float IndexSelect<kDGLCUDA, float>(NDArray array, int64_t index);
template double IndexSelect<kDGLCUDA, double>(NDArray array, int64_t index);
} // namespace impl
} // namespace aten
} // namespace dgl
| 30f62ccf4c195cc0c6ba330e0834acecd41214d6.cu | /**
* Copyright (c) 2019 by Contributors
* @file array/cpu/array_index_select.cu
* @brief Array index select GPU implementation
*/
#include <dgl/array.h>
#include "../../runtime/cuda/cuda_common.h"
#include "./array_index_select.cuh"
#include "./utils.h"
namespace dgl {
using runtime::NDArray;
namespace aten {
namespace impl {
template <DGLDeviceType XPU, typename DType, typename IdType>
NDArray IndexSelect(NDArray array, IdArray index) {
const int64_t arr_len = array->shape[0];
const int64_t len = index->shape[0];
int64_t num_feat = 1;
std::vector<int64_t> shape{len};
for (int d = 1; d < array->ndim; ++d) {
num_feat *= array->shape[d];
shape.emplace_back(array->shape[d]);
}
// use index->ctx for pinned array
NDArray ret = NDArray::Empty(shape, array->dtype, index->ctx);
if (len == 0 || arr_len * num_feat == 0) return ret;
DType* ret_data = static_cast<DType*>(ret->data);
const DType* array_data = static_cast<DType*>(cuda::GetDevicePointer(array));
const IdType* idx_data = static_cast<IdType*>(index->data);
cudaStream_t stream = runtime::getCurrentCUDAStream();
if (num_feat == 1) {
const int nt = cuda::FindNumThreads(len);
const int nb = (len + nt - 1) / nt;
CUDA_KERNEL_CALL(
IndexSelectSingleKernel, nb, nt, 0, stream, array_data, idx_data, len,
arr_len, ret_data);
} else {
dim3 block(256, 1);
while (static_cast<int64_t>(block.x) >= 2 * num_feat) {
block.x /= 2;
block.y *= 2;
}
const dim3 grid((len + block.y - 1) / block.y);
CUDA_KERNEL_CALL(
IndexSelectMultiKernel, grid, block, 0, stream, array_data, num_feat,
idx_data, len, arr_len, ret_data);
}
return ret;
}
template NDArray IndexSelect<kDGLCUDA, int32_t, int32_t>(NDArray, IdArray);
template NDArray IndexSelect<kDGLCUDA, int32_t, int64_t>(NDArray, IdArray);
template NDArray IndexSelect<kDGLCUDA, int64_t, int32_t>(NDArray, IdArray);
template NDArray IndexSelect<kDGLCUDA, int64_t, int64_t>(NDArray, IdArray);
template NDArray IndexSelect<kDGLCUDA, __half, int32_t>(NDArray, IdArray);
template NDArray IndexSelect<kDGLCUDA, __half, int64_t>(NDArray, IdArray);
#if BF16_ENABLED
template NDArray IndexSelect<kDGLCUDA, __nv_bfloat16, int32_t>(
NDArray, IdArray);
template NDArray IndexSelect<kDGLCUDA, __nv_bfloat16, int64_t>(
NDArray, IdArray);
#endif // BF16_ENABLED
template NDArray IndexSelect<kDGLCUDA, float, int32_t>(NDArray, IdArray);
template NDArray IndexSelect<kDGLCUDA, float, int64_t>(NDArray, IdArray);
template NDArray IndexSelect<kDGLCUDA, double, int32_t>(NDArray, IdArray);
template NDArray IndexSelect<kDGLCUDA, double, int64_t>(NDArray, IdArray);
template <DGLDeviceType XPU, typename DType>
DType IndexSelect(NDArray array, int64_t index) {
auto device = runtime::DeviceAPI::Get(array->ctx);
DType ret = static_cast<DType>(0.0f);
device->CopyDataFromTo(
static_cast<DType*>(array->data) + index, 0, &ret, 0, sizeof(DType),
array->ctx, DGLContext{kDGLCPU, 0}, array->dtype);
return ret;
}
template int32_t IndexSelect<kDGLCUDA, int32_t>(NDArray array, int64_t index);
template int64_t IndexSelect<kDGLCUDA, int64_t>(NDArray array, int64_t index);
template uint32_t IndexSelect<kDGLCUDA, uint32_t>(NDArray array, int64_t index);
template uint64_t IndexSelect<kDGLCUDA, uint64_t>(NDArray array, int64_t index);
template __half IndexSelect<kDGLCUDA, __half>(NDArray array, int64_t index);
#if BF16_ENABLED
template __nv_bfloat16 IndexSelect<kDGLCUDA, __nv_bfloat16>(
NDArray array, int64_t index);
#endif // BF16_ENABLED
template float IndexSelect<kDGLCUDA, float>(NDArray array, int64_t index);
template double IndexSelect<kDGLCUDA, double>(NDArray array, int64_t index);
} // namespace impl
} // namespace aten
} // namespace dgl
|
ec7538b4eee9551961ce138b3527ad001eb478eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "device_helpers_hip.cuh"
rtDeclareVariable(optix::Ray, ray, rtCurrentRay, );
rtDeclareVariable(PerRayData_radiance, prd, rtPayload, );
rtDeclareVariable(uint2, launch_index, rtLaunchIndex, );
rtDeclareVariable(uint, volume_width, ,);
rtDeclareVariable(uint, volume_height, ,);
rtDeclareVariable(uint, volume_depth, ,);
rtDeclareVariable(float, stepping_distance, ,);
rtDeclareVariable(float, ambient_light, ,);
rtTextureSampler<float, 3> volume_texture;
rtTextureSampler<float4, 1> transfer_fn_texture;
#define MAX_STEPS 50000
#define kassert( X ) if ( !(X) ) {\
return ;}
#define EPS 5.0f
static __device__ bool get_aabb_ray_intersection(float &tmin, float &tmax) {
optix::float3 orig = ray.origin;
optix::float3 dir = ray.direction;
optix::float3 invdir = 1/dir;
int sign[3];
sign[0] = (invdir.x < 0);
sign[1] = (invdir.y < 0);
sign[2] = (invdir.z < 0);
float tymin, tymax, tzmin, tzmax;
optix::float3 bounds[2] = {
optix::make_float3(0, 0, 0),
optix::make_float3(volume_width, volume_height, volume_depth)
};
tmin = (bounds[sign[0]].x - orig.x) * invdir.x;
tmax = (bounds[1-sign[0]].x - orig.x) * invdir.x;
tymin = (bounds[sign[1]].y - orig.y) * invdir.y;
tymax = (bounds[1-sign[1]].y - orig.y) * invdir.y;
if ((tmin > tymax) || (tymin > tmax))
return false;
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
tzmin = (bounds[sign[2]].z - orig.z) * invdir.z;
tzmax = (bounds[1-sign[2]].z - orig.z) * invdir.z;
if ((tmin > tzmax) || (tzmin > tmax))
return false;
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return true;
}
RT_PROGRAM void check_intersection(int prim_index /*There's always 1 primitive*/) {
float tmin = 0.f, tmax = 0.f;
bool intersected = get_aabb_ray_intersection(tmin, tmax);
if (!intersected) {
// rtPrintf("Nope ");
rtThrow(RAY_MISSED_BB);
return;
}
if ((tmax - tmin) < EPS) {
return;
}
float n_steps = min((tmax - tmin)/stepping_distance, (float)MAX_STEPS);
prd.r = prd.g = prd.b = ambient_light;
for(float curr_t = tmin, steps = 0; curr_t < tmax && steps < n_steps; curr_t += stepping_distance, steps++) {
float3 point = ray.origin + curr_t*ray.direction;
{
int isovalue = (int) (tex3D(
volume_texture,
(int)(point.x + .5f),/// (float) volume_width),
(int)(point.y + .5f),/// (float) volume_height),
(int)(point.z + .5f )/// (float) volume_depth)
) * 255.99f);
optix::float4 color = tex1D(transfer_fn_texture, isovalue) / 255.99f;
prd.r += (1 - prd.alpha) * optix::getByIndex(color, 0)/n_steps;
prd.g += (1 - prd.alpha) * optix::getByIndex(color, 1)/n_steps;
prd.b += (1 - prd.alpha) * optix::getByIndex(color, 2)/n_steps;
prd.alpha += (1 - prd.alpha) * optix::getByIndex(color, 3)/n_steps;
if (prd.alpha > 1) {
// break;
}
}
}
prd.r *= 2;
prd.g *= 2;
prd.b *= 2;
prd.alpha *= 2;
}
| ec7538b4eee9551961ce138b3527ad001eb478eb.cu | #include "device_helpers.cuh"
rtDeclareVariable(optix::Ray, ray, rtCurrentRay, );
rtDeclareVariable(PerRayData_radiance, prd, rtPayload, );
rtDeclareVariable(uint2, launch_index, rtLaunchIndex, );
rtDeclareVariable(uint, volume_width, ,);
rtDeclareVariable(uint, volume_height, ,);
rtDeclareVariable(uint, volume_depth, ,);
rtDeclareVariable(float, stepping_distance, ,);
rtDeclareVariable(float, ambient_light, ,);
rtTextureSampler<float, 3> volume_texture;
rtTextureSampler<float4, 1> transfer_fn_texture;
#define MAX_STEPS 50000
#define kassert( X ) if ( !(X) ) {\
return ;}
#define EPS 5.0f
static __device__ bool get_aabb_ray_intersection(float &tmin, float &tmax) {
optix::float3 orig = ray.origin;
optix::float3 dir = ray.direction;
optix::float3 invdir = 1/dir;
int sign[3];
sign[0] = (invdir.x < 0);
sign[1] = (invdir.y < 0);
sign[2] = (invdir.z < 0);
float tymin, tymax, tzmin, tzmax;
optix::float3 bounds[2] = {
optix::make_float3(0, 0, 0),
optix::make_float3(volume_width, volume_height, volume_depth)
};
tmin = (bounds[sign[0]].x - orig.x) * invdir.x;
tmax = (bounds[1-sign[0]].x - orig.x) * invdir.x;
tymin = (bounds[sign[1]].y - orig.y) * invdir.y;
tymax = (bounds[1-sign[1]].y - orig.y) * invdir.y;
if ((tmin > tymax) || (tymin > tmax))
return false;
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
tzmin = (bounds[sign[2]].z - orig.z) * invdir.z;
tzmax = (bounds[1-sign[2]].z - orig.z) * invdir.z;
if ((tmin > tzmax) || (tzmin > tmax))
return false;
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return true;
}
RT_PROGRAM void check_intersection(int prim_index /*There's always 1 primitive*/) {
float tmin = 0.f, tmax = 0.f;
bool intersected = get_aabb_ray_intersection(tmin, tmax);
if (!intersected) {
// rtPrintf("Nope ");
rtThrow(RAY_MISSED_BB);
return;
}
if ((tmax - tmin) < EPS) {
return;
}
float n_steps = min((tmax - tmin)/stepping_distance, (float)MAX_STEPS);
prd.r = prd.g = prd.b = ambient_light;
for(float curr_t = tmin, steps = 0; curr_t < tmax && steps < n_steps; curr_t += stepping_distance, steps++) {
float3 point = ray.origin + curr_t*ray.direction;
{
int isovalue = (int) (tex3D(
volume_texture,
(int)(point.x + .5f),/// (float) volume_width),
(int)(point.y + .5f),/// (float) volume_height),
(int)(point.z + .5f )/// (float) volume_depth)
) * 255.99f);
optix::float4 color = tex1D(transfer_fn_texture, isovalue) / 255.99f;
prd.r += (1 - prd.alpha) * optix::getByIndex(color, 0)/n_steps;
prd.g += (1 - prd.alpha) * optix::getByIndex(color, 1)/n_steps;
prd.b += (1 - prd.alpha) * optix::getByIndex(color, 2)/n_steps;
prd.alpha += (1 - prd.alpha) * optix::getByIndex(color, 3)/n_steps;
if (prd.alpha > 1) {
// break;
}
}
}
prd.r *= 2;
prd.g *= 2;
prd.b *= 2;
prd.alpha *= 2;
}
|
d3c03466d866159dfe8cefdaf2053209eb699f70.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2020 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "simulator.h"
#include "model.h"
#include "realm/runtime_impl.h"
#include "realm/cuda/cuda_module.h"
#include "cuda_helper.h"
typedef long long int coord_t;
typedef Realm::Point<1, coord_t> Point1;
typedef Realm::Rect<1, coord_t> Rect1;
Simulator::Simulator(const FFModel* model,
FFHandler _handler,
Memory _memory)
: memory(_memory), handler(_handler),
offset(0), warmup_times(5), repeat_times(10)
{
// Allocate simulator memory
Rect1 bounds(Point1(0), Point1(0));
std::vector<size_t> field_sizes;
field_sizes.push_back(model->config.simulator_work_space_size);
Realm::RegionInstance::create_instance(simulatorInst,
memory, bounds, field_sizes, 0, Realm::ProfilingRequestSet()).wait();
base_ptr = (char*)simulatorInst.pointer_untyped(0, sizeof(char));
capacity = model->config.simulator_work_space_size;
float inter_gpu_bandwidth = 20 * 1024 * 1024.0f; /* B/ms*/
float inter_node_bandwidth = 12 * 1024 * 1024.0f / model->config.numNodes; /* B/ms*/
float gpu_dram_bandwidth = 16 * 1024 * 1024.0f; /* B/ms*/
size_t max_num_tasks = 1024 * 1024;
hipEventCreate(&start_event);
hipEventCreate(&end_event);
conv2d_meta = new Conv2DMeta(handler);
linear_meta = new LinearMeta(handler, 4096);
pool2d_meta = new Pool2DMeta(handler);
ele_unary_meta = new ElementUnaryMeta(handler);
ele_binary_meta = new ElementBinaryMeta(handler);
int num_nodes = model->config.numNodes;
int gpus_per_node = model->config.workersPerNode;
total_num_devices = num_nodes * gpus_per_node;
// Create GPU compute device
for (int i = 0; i < num_nodes; i++)
for (int j = 0; j < gpus_per_node; j++) {
id_to_compute_device[i*gpus_per_node+j] = new Device(Device::DEVICE_GPU,
i, i*gpus_per_node+j);
}
// Create inter GPU comm devices:
for (int i = 0; i < total_num_devices; i++)
for (int j = 0; j < total_num_devices; j++) {
Device* src = id_to_compute_device[i];
Device* dst = id_to_compute_device[j];
if (src->node_id == dst->node_id && src != dst) {
int hash = i * total_num_devices + j;
ids_to_inter_gpu_comm_device[hash] = new Device(Device::DEVICE_COMM,
inter_gpu_bandwidth);
}
}
// Create gpu<->dram comm devices
for (int i = 0; i < total_num_devices; i++) {
id_to_gputodram_comm_device[i] = new Device(Device::DEVICE_COMM,
gpu_dram_bandwidth);
id_to_dramtogpu_comm_device[i] = new Device(Device::DEVICE_COMM,
gpu_dram_bandwidth);
}
// Create inter node comm devices
for (int i = 0; i < num_nodes; i++)
for (int j = 0; j < num_nodes; j++)
if (i != j) {
int hash = i * total_num_devices + j;
ids_to_inter_node_comm_device[hash] = new Device(Device::DEVICE_COMM,
inter_node_bandwidth);
}
// Initialize task manager
task_manager = new TaskManager(max_num_tasks);
}
Simulator::~Simulator(void)
{
simulatorInst.destroy();
}
__host__
void Simulator::strategy_search_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
const FFModel* model = *((FFModel**) task->args);
Memory gpu_mem = Machine::MemoryQuery(Machine::get_machine())
.only_kind(Memory::GPU_FB_MEM).best_affinity_to(task->target_proc).first();
// Realm::MemoryImpl* memImpl =
// Realm::get_runtime()->get_memory_impl(gpu_mem);
// Realm::Cuda::GPUFBMemory* memFBImpl = (Realm::Cuda::GPUFBMemory*) memImpl;
// off_t offset = memFBImpl->alloc_bytes_local(model->config.simulator_work_space_size);
// void* base_ptr = memFBImpl->get_direct_ptr(offset, 0);
// Assume this task is running on GPU0
Simulator* simulator = new Simulator(model, model->handlers[0], gpu_mem);
// Set cublas/cudnn streams to allow Realm catch the events
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDA(hipblasSetStream(simulator->handler.blas, stream));
checkCUDNN(cudnnSetStream(simulator->handler.dnn, stream));
#endif
std::map<Op*, ParallelConfig> strategies;
if (model->config.import_strategy_file.length() > 0) {
// Load the strategy from config.strategies
for (size_t l = 0; l < model->layers.size(); l++) {
MappingTagID key = FFConfig::get_hash_id(std::string(model->layers[l]->name));
std::map<MappingTagID, ParallelConfig>::const_iterator iter;
iter = model->config.strategies.find(key);
if (iter == model->config.strategies.end()) {
fprintf(stderr, "ERROR: Cannot find strategy for operator %s in "
"strategy file %s\n", model->layers[l]->name,
model->config.import_strategy_file.c_str());
}
strategies[model->layers[l]] = iter->second;
}
} else {
// Start from data parallel
for (size_t l = 0; l < model->layers.size(); l++) {
strategies[model->layers[l]] = model->layers[l]->get_data_parallel_config(*model);
}
}
model->optimize(simulator, strategies, model->config.search_budget, model->config.search_alpha);
if (model->config.export_strategy_file.length() > 0) {
fprintf(stderr, "Exporting the best discovered strategy to %s\n",
model->config.export_strategy_file.c_str());
std::map<Op*, ParallelConfig>::const_iterator iter;
std::map<std::string, ParallelConfig> strategy_output;
for (iter = strategies.begin(); iter != strategies.end(); iter++) {
strategy_output[iter->first->name] = iter->second;
}
save_strategies_to_file(model->config.export_strategy_file, strategy_output);
}
// Start from data
// memFBImpl->free_bytes_local(offset, model->config.simulator_work_space_size);
delete(simulator);
}
| d3c03466d866159dfe8cefdaf2053209eb699f70.cu | /* Copyright 2020 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "simulator.h"
#include "model.h"
#include "realm/runtime_impl.h"
#include "realm/cuda/cuda_module.h"
#include "cuda_helper.h"
typedef long long int coord_t;
typedef Realm::Point<1, coord_t> Point1;
typedef Realm::Rect<1, coord_t> Rect1;
Simulator::Simulator(const FFModel* model,
FFHandler _handler,
Memory _memory)
: memory(_memory), handler(_handler),
offset(0), warmup_times(5), repeat_times(10)
{
// Allocate simulator memory
Rect1 bounds(Point1(0), Point1(0));
std::vector<size_t> field_sizes;
field_sizes.push_back(model->config.simulator_work_space_size);
Realm::RegionInstance::create_instance(simulatorInst,
memory, bounds, field_sizes, 0, Realm::ProfilingRequestSet()).wait();
base_ptr = (char*)simulatorInst.pointer_untyped(0, sizeof(char));
capacity = model->config.simulator_work_space_size;
float inter_gpu_bandwidth = 20 * 1024 * 1024.0f; /* B/ms*/
float inter_node_bandwidth = 12 * 1024 * 1024.0f / model->config.numNodes; /* B/ms*/
float gpu_dram_bandwidth = 16 * 1024 * 1024.0f; /* B/ms*/
size_t max_num_tasks = 1024 * 1024;
cudaEventCreate(&start_event);
cudaEventCreate(&end_event);
conv2d_meta = new Conv2DMeta(handler);
linear_meta = new LinearMeta(handler, 4096);
pool2d_meta = new Pool2DMeta(handler);
ele_unary_meta = new ElementUnaryMeta(handler);
ele_binary_meta = new ElementBinaryMeta(handler);
int num_nodes = model->config.numNodes;
int gpus_per_node = model->config.workersPerNode;
total_num_devices = num_nodes * gpus_per_node;
// Create GPU compute device
for (int i = 0; i < num_nodes; i++)
for (int j = 0; j < gpus_per_node; j++) {
id_to_compute_device[i*gpus_per_node+j] = new Device(Device::DEVICE_GPU,
i, i*gpus_per_node+j);
}
// Create inter GPU comm devices:
for (int i = 0; i < total_num_devices; i++)
for (int j = 0; j < total_num_devices; j++) {
Device* src = id_to_compute_device[i];
Device* dst = id_to_compute_device[j];
if (src->node_id == dst->node_id && src != dst) {
int hash = i * total_num_devices + j;
ids_to_inter_gpu_comm_device[hash] = new Device(Device::DEVICE_COMM,
inter_gpu_bandwidth);
}
}
// Create gpu<->dram comm devices
for (int i = 0; i < total_num_devices; i++) {
id_to_gputodram_comm_device[i] = new Device(Device::DEVICE_COMM,
gpu_dram_bandwidth);
id_to_dramtogpu_comm_device[i] = new Device(Device::DEVICE_COMM,
gpu_dram_bandwidth);
}
// Create inter node comm devices
for (int i = 0; i < num_nodes; i++)
for (int j = 0; j < num_nodes; j++)
if (i != j) {
int hash = i * total_num_devices + j;
ids_to_inter_node_comm_device[hash] = new Device(Device::DEVICE_COMM,
inter_node_bandwidth);
}
// Initialize task manager
task_manager = new TaskManager(max_num_tasks);
}
Simulator::~Simulator(void)
{
simulatorInst.destroy();
}
__host__
void Simulator::strategy_search_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
const FFModel* model = *((FFModel**) task->args);
Memory gpu_mem = Machine::MemoryQuery(Machine::get_machine())
.only_kind(Memory::GPU_FB_MEM).best_affinity_to(task->target_proc).first();
// Realm::MemoryImpl* memImpl =
// Realm::get_runtime()->get_memory_impl(gpu_mem);
// Realm::Cuda::GPUFBMemory* memFBImpl = (Realm::Cuda::GPUFBMemory*) memImpl;
// off_t offset = memFBImpl->alloc_bytes_local(model->config.simulator_work_space_size);
// void* base_ptr = memFBImpl->get_direct_ptr(offset, 0);
// Assume this task is running on GPU0
Simulator* simulator = new Simulator(model, model->handlers[0], gpu_mem);
// Set cublas/cudnn streams to allow Realm catch the events
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDA(cublasSetStream(simulator->handler.blas, stream));
checkCUDNN(cudnnSetStream(simulator->handler.dnn, stream));
#endif
std::map<Op*, ParallelConfig> strategies;
if (model->config.import_strategy_file.length() > 0) {
// Load the strategy from config.strategies
for (size_t l = 0; l < model->layers.size(); l++) {
MappingTagID key = FFConfig::get_hash_id(std::string(model->layers[l]->name));
std::map<MappingTagID, ParallelConfig>::const_iterator iter;
iter = model->config.strategies.find(key);
if (iter == model->config.strategies.end()) {
fprintf(stderr, "ERROR: Cannot find strategy for operator %s in "
"strategy file %s\n", model->layers[l]->name,
model->config.import_strategy_file.c_str());
}
strategies[model->layers[l]] = iter->second;
}
} else {
// Start from data parallel
for (size_t l = 0; l < model->layers.size(); l++) {
strategies[model->layers[l]] = model->layers[l]->get_data_parallel_config(*model);
}
}
model->optimize(simulator, strategies, model->config.search_budget, model->config.search_alpha);
if (model->config.export_strategy_file.length() > 0) {
fprintf(stderr, "Exporting the best discovered strategy to %s\n",
model->config.export_strategy_file.c_str());
std::map<Op*, ParallelConfig>::const_iterator iter;
std::map<std::string, ParallelConfig> strategy_output;
for (iter = strategies.begin(); iter != strategies.end(); iter++) {
strategy_output[iter->first->name] = iter->second;
}
save_strategies_to_file(model->config.export_strategy_file, strategy_output);
}
// Start from data
// memFBImpl->free_bytes_local(offset, model->config.simulator_work_space_size);
delete(simulator);
}
|
692af0edd59b90e7953e67278e153f4431075453.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=[64,64,1] --blockDim=[8,8,1]
#include "common_quantization.h"
__global__ void CUDAkernelQuantizationShort(short *SrcDst, int Stride)
{
__requires(Stride == 512);
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index (current coefficient)
int tx = threadIdx.x;
int ty = threadIdx.y;
//copy current coefficient to the local variable
short curCoef = SrcDst[(by * BLOCK_SIZE + ty) * Stride + (bx * BLOCK_SIZE + tx) ];
short curQuant = Q[ty * BLOCK_SIZE + tx];
//quantize the current coefficient
if (curCoef < 0)
{
curCoef = -curCoef;
curCoef += curQuant>>1;
curCoef /= curQuant;
curCoef = -curCoef;
}
else
{
curCoef += curQuant>>1;
curCoef /= curQuant;
}
__syncthreads();
curCoef = curCoef * curQuant;
//copy quantized coefficient back to the DCT-plane
SrcDst[(by * BLOCK_SIZE + ty) * Stride + (bx * BLOCK_SIZE + tx) ] = curCoef;
}
| 692af0edd59b90e7953e67278e153f4431075453.cu | //pass
//--gridDim=[64,64,1] --blockDim=[8,8,1]
#include "common_quantization.h"
__global__ void CUDAkernelQuantizationShort(short *SrcDst, int Stride)
{
__requires(Stride == 512);
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index (current coefficient)
int tx = threadIdx.x;
int ty = threadIdx.y;
//copy current coefficient to the local variable
short curCoef = SrcDst[(by * BLOCK_SIZE + ty) * Stride + (bx * BLOCK_SIZE + tx) ];
short curQuant = Q[ty * BLOCK_SIZE + tx];
//quantize the current coefficient
if (curCoef < 0)
{
curCoef = -curCoef;
curCoef += curQuant>>1;
curCoef /= curQuant;
curCoef = -curCoef;
}
else
{
curCoef += curQuant>>1;
curCoef /= curQuant;
}
__syncthreads();
curCoef = curCoef * curQuant;
//copy quantized coefficient back to the DCT-plane
SrcDst[(by * BLOCK_SIZE + ty) * Stride + (bx * BLOCK_SIZE + tx) ] = curCoef;
}
|
0d1051d5ccfdd57813f3b5dcb9fc675cdc80bf29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<cstdio>
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<cuda_runtime.h>
using std::cout;
using std::endl;
static inline void _safe_cuda_call(hipError_t err, const char* msg, const char* file_name, const int line_number)
{
if (err != hipSuccess)
{
fprintf(stderr, "%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n", msg, file_name, line_number, hipGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__)
#define DIM 8192
struct hipComplex {
float r;
float i;
// hipComplex( float a, float b ) : r(a), i(b) {}
__device__ hipComplex(float a, float b) : r(a), i(b) {} // Fix error for calling host function from device
__device__ float magnitude2(void) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i * a.i, i*a.r + r * a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r + a.r, i + a.i);
}
};
__device__ int julia(int x, int y) {
const float scale = 1.5;
float jx = scale * (float)(DIM / 2 - x) / (DIM / 2);
float jy = scale * (float)(DIM / 2 - y) / (DIM / 2);
hipComplex c(-0.8, 0.156);
hipComplex a(jx, jy);
int i = 0;
for (i = 0; i<200; i++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel(unsigned char *ptr) {
// map from blockIdx to pixel position
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int offset = x + y * DIM;
// now calculate the value at that position
if (x < DIM || y < DIM) {
int juliaValue = julia(x, y);
ptr[offset*3] = 0;
ptr[offset*3 + 1] = 0;
ptr[offset*3 + 2] = 255 * juliaValue;
}
}
void julia(cv::Mat& output)
{
//Calculate total number of bytes of input and output image
const int grayBytes = output.step * output.rows;
unsigned char *d_output;
std::cout << grayBytes << std::endl;
//Allocate device memory
SAFE_CALL(hipMalloc<unsigned char>(&d_output, grayBytes), "CUDA Malloc Failed");
//Copy data from OpenCV input image to device memory
//Specify a reasonable block size
const dim3 block(16, 16);
//Calculate grid size to cover the whole image
const dim3 grid((output.cols + block.x - 1) / block.x, (output.rows + block.y - 1) / block.y);
//Launch the color conversion kernel
hipLaunchKernelGGL(( kernel) , dim3(grid), dim3(block) , 0, 0, d_output);
//Synchronize to check for any kernel launch errors
SAFE_CALL(hipDeviceSynchronize(), "Kernel Launch Failed");
//Copy back data from destination device meory to OpenCV output image
SAFE_CALL(hipMemcpy(output.ptr(), d_output, grayBytes, hipMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed");
//Free the device memory
SAFE_CALL(hipFree(d_output), "CUDA Free Failed");
}
int main(int argc, char** argv)
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
if(argc<2){
std::cout << "Please Enter JPG Save File Location" << std::endl;
return -1;
}
//Create output image
cv::Mat output(DIM, DIM, CV_8UC3);
std::cout << output.cols << std::endl;
hipEventRecord(start,0);
//Call the wrapper function
julia(output);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
std::cout << "time to generate " << 1000 * elapsedTime << "us." << std::endl;
//Show the input and output
//cv::imshow("Input", input);
//cv::imshow("Output", output);
cv::imwrite(std::string(argv[1]), output);
//Wait for key press
cv::waitKey();
std::cin.get();
return 0;
}
| 0d1051d5ccfdd57813f3b5dcb9fc675cdc80bf29.cu | #include<iostream>
#include<cstdio>
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<cuda_runtime.h>
using std::cout;
using std::endl;
static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number)
{
if (err != cudaSuccess)
{
fprintf(stderr, "%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n", msg, file_name, line_number, cudaGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__)
#define DIM 8192
struct cuComplex {
float r;
float i;
// cuComplex( float a, float b ) : r(a), i(b) {}
__device__ cuComplex(float a, float b) : r(a), i(b) {} // Fix error for calling host function from device
__device__ float magnitude2(void) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i * a.i, i*a.r + r * a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r + a.r, i + a.i);
}
};
__device__ int julia(int x, int y) {
const float scale = 1.5;
float jx = scale * (float)(DIM / 2 - x) / (DIM / 2);
float jy = scale * (float)(DIM / 2 - y) / (DIM / 2);
cuComplex c(-0.8, 0.156);
cuComplex a(jx, jy);
int i = 0;
for (i = 0; i<200; i++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel(unsigned char *ptr) {
// map from blockIdx to pixel position
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int offset = x + y * DIM;
// now calculate the value at that position
if (x < DIM || y < DIM) {
int juliaValue = julia(x, y);
ptr[offset*3] = 0;
ptr[offset*3 + 1] = 0;
ptr[offset*3 + 2] = 255 * juliaValue;
}
}
void julia(cv::Mat& output)
{
//Calculate total number of bytes of input and output image
const int grayBytes = output.step * output.rows;
unsigned char *d_output;
std::cout << grayBytes << std::endl;
//Allocate device memory
SAFE_CALL(cudaMalloc<unsigned char>(&d_output, grayBytes), "CUDA Malloc Failed");
//Copy data from OpenCV input image to device memory
//Specify a reasonable block size
const dim3 block(16, 16);
//Calculate grid size to cover the whole image
const dim3 grid((output.cols + block.x - 1) / block.x, (output.rows + block.y - 1) / block.y);
//Launch the color conversion kernel
kernel <<<grid, block >>>(d_output);
//Synchronize to check for any kernel launch errors
SAFE_CALL(cudaDeviceSynchronize(), "Kernel Launch Failed");
//Copy back data from destination device meory to OpenCV output image
SAFE_CALL(cudaMemcpy(output.ptr(), d_output, grayBytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed");
//Free the device memory
SAFE_CALL(cudaFree(d_output), "CUDA Free Failed");
}
int main(int argc, char** argv)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if(argc<2){
std::cout << "Please Enter JPG Save File Location" << std::endl;
return -1;
}
//Create output image
cv::Mat output(DIM, DIM, CV_8UC3);
std::cout << output.cols << std::endl;
cudaEventRecord(start,0);
//Call the wrapper function
julia(output);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
std::cout << "time to generate " << 1000 * elapsedTime << "us." << std::endl;
//Show the input and output
//cv::imshow("Input", input);
//cv::imshow("Output", output);
cv::imwrite(std::string(argv[1]), output);
//Wait for key press
cv::waitKey();
std::cin.get();
return 0;
}
|
383962b0c966cb5e542f4d1ceb164eb1a90f3476.hip | // !!! This is a file automatically generated by hipify!!!
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Milad Rakhsha, Arman Pazouki
// =============================================================================
//
// Implementation of FSI system that includes all subclasses for proximity and
// force calculation, and time integration.
//
// =============================================================================
#include <thrust/copy.h>
#include <thrust/gather.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/transform.h>
#include "chrono_fsi/physics/ChSystemFsi_impl.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
namespace chrono {
namespace fsi {
struct sphTypeCompEqual {
__host__ __device__ bool operator()(const Real4& o1, const Real4& o2) { return o1.w == o2.w; }
};
//---------------------------------------------------------------------------------------
zipIterSphD SphMarkerDataD::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(posRadD.begin(), velMasD.begin(), rhoPresMuD.begin(),
tauXxYyZzD.begin(), tauXyXzYzD.begin()));
}
void SphMarkerDataD::resize(size_t s) {
posRadD.resize(s);
velMasD.resize(s);
rhoPresMuD.resize(s);
tauXxYyZzD.resize(s);
tauXyXzYzD.resize(s);
}
//---------------------------------------------------------------------------------------
zipIterSphH SphMarkerDataH::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(posRadH.begin(), velMasH.begin(), rhoPresMuH.begin(),
tauXxYyZzH.begin(), tauXyXzYzH.begin()));
}
// resize
void SphMarkerDataH::resize(size_t s) {
posRadH.resize(s);
velMasH.resize(s);
rhoPresMuH.resize(s);
tauXxYyZzH.resize(s);
tauXyXzYzH.resize(s);
}
//---------------------------------------------------------------------------------------
zipIterRigidD FsiBodiesDataD::iterator() {
return thrust::make_zip_iterator(
thrust::make_tuple(posRigid_fsiBodies_D.begin(), velMassRigid_fsiBodies_D.begin(), accRigid_fsiBodies_D.begin(),
q_fsiBodies_D.begin(), omegaVelLRF_fsiBodies_D.begin(), omegaAccLRF_fsiBodies_D.begin()));
}
void FsiBodiesDataD::resize(size_t s) {
posRigid_fsiBodies_D.resize(s);
velMassRigid_fsiBodies_D.resize(s);
accRigid_fsiBodies_D.resize(s);
q_fsiBodies_D.resize(s);
omegaVelLRF_fsiBodies_D.resize(s);
omegaAccLRF_fsiBodies_D.resize(s);
}
void FsiShellsDataH::resize(size_t s) {
posFlex_fsiBodies_nA_H.resize(s);
posFlex_fsiBodies_nB_H.resize(s);
posFlex_fsiBodies_nC_H.resize(s);
posFlex_fsiBodies_nD_H.resize(s);
velFlex_fsiBodies_nA_H.resize(s);
velFlex_fsiBodies_nB_H.resize(s);
velFlex_fsiBodies_nC_H.resize(s);
velFlex_fsiBodies_nD_H.resize(s);
accFlex_fsiBodies_nA_H.resize(s);
accFlex_fsiBodies_nB_H.resize(s);
accFlex_fsiBodies_nC_H.resize(s);
accFlex_fsiBodies_nD_H.resize(s);
}
void FsiShellsDataD::resize(size_t s) {
posFlex_fsiBodies_nA_D.resize(s);
posFlex_fsiBodies_nB_D.resize(s);
posFlex_fsiBodies_nC_D.resize(s);
posFlex_fsiBodies_nD_D.resize(s);
velFlex_fsiBodies_nA_D.resize(s);
velFlex_fsiBodies_nB_D.resize(s);
velFlex_fsiBodies_nC_D.resize(s);
velFlex_fsiBodies_nD_D.resize(s);
accFlex_fsiBodies_nA_D.resize(s);
accFlex_fsiBodies_nB_D.resize(s);
accFlex_fsiBodies_nC_D.resize(s);
accFlex_fsiBodies_nD_D.resize(s);
}
void FsiMeshDataH::resize(size_t s) {
pos_fsi_fea_H.resize(s);
vel_fsi_fea_H.resize(s);
acc_fsi_fea_H.resize(s);
dir_fsi_fea_H.resize(s);
}
void FsiMeshDataD::resize(size_t s) {
pos_fsi_fea_D.resize(s);
vel_fsi_fea_D.resize(s);
acc_fsi_fea_D.resize(s);
dir_fsi_fea_D.resize(s);
}
void FsiBodiesDataD::CopyFromH(const FsiBodiesDataH& other) {
thrust::copy(other.posRigid_fsiBodies_H.begin(), other.posRigid_fsiBodies_H.end(), posRigid_fsiBodies_D.begin());
thrust::copy(other.velMassRigid_fsiBodies_H.begin(), other.velMassRigid_fsiBodies_H.end(),
velMassRigid_fsiBodies_D.begin());
thrust::copy(other.accRigid_fsiBodies_H.begin(), other.accRigid_fsiBodies_H.end(), accRigid_fsiBodies_D.begin());
thrust::copy(other.q_fsiBodies_H.begin(), other.q_fsiBodies_H.end(), q_fsiBodies_D.begin());
thrust::copy(other.omegaVelLRF_fsiBodies_H.begin(), other.omegaVelLRF_fsiBodies_H.end(),
omegaVelLRF_fsiBodies_D.begin());
thrust::copy(other.omegaAccLRF_fsiBodies_H.begin(), other.omegaAccLRF_fsiBodies_H.end(),
omegaAccLRF_fsiBodies_D.begin());
}
void FsiShellsDataD::CopyFromH(const FsiShellsDataH& other) {
thrust::copy(other.posFlex_fsiBodies_nA_H.begin(), other.posFlex_fsiBodies_nA_H.end(),
posFlex_fsiBodies_nA_D.begin());
thrust::copy(other.posFlex_fsiBodies_nB_H.begin(), other.posFlex_fsiBodies_nB_H.end(),
posFlex_fsiBodies_nB_D.begin());
thrust::copy(other.posFlex_fsiBodies_nC_H.begin(), other.posFlex_fsiBodies_nC_H.end(),
posFlex_fsiBodies_nC_D.begin());
thrust::copy(other.posFlex_fsiBodies_nD_H.begin(), other.posFlex_fsiBodies_nD_H.end(),
posFlex_fsiBodies_nD_D.begin());
thrust::copy(other.velFlex_fsiBodies_nA_H.begin(), other.velFlex_fsiBodies_nA_H.end(),
velFlex_fsiBodies_nA_D.begin());
thrust::copy(other.velFlex_fsiBodies_nB_H.begin(), other.velFlex_fsiBodies_nB_H.end(),
velFlex_fsiBodies_nB_D.begin());
thrust::copy(other.velFlex_fsiBodies_nC_H.begin(), other.velFlex_fsiBodies_nC_H.end(),
velFlex_fsiBodies_nC_D.begin());
thrust::copy(other.velFlex_fsiBodies_nD_H.begin(), other.velFlex_fsiBodies_nD_H.end(),
velFlex_fsiBodies_nD_D.begin());
thrust::copy(other.accFlex_fsiBodies_nA_H.begin(), other.accFlex_fsiBodies_nA_H.end(),
accFlex_fsiBodies_nA_D.begin());
thrust::copy(other.accFlex_fsiBodies_nB_H.begin(), other.accFlex_fsiBodies_nB_H.end(),
accFlex_fsiBodies_nB_D.begin());
thrust::copy(other.accFlex_fsiBodies_nC_H.begin(), other.accFlex_fsiBodies_nC_H.end(),
accFlex_fsiBodies_nC_D.begin());
thrust::copy(other.accFlex_fsiBodies_nD_H.begin(), other.accFlex_fsiBodies_nD_H.end(),
accFlex_fsiBodies_nD_D.begin());
}
void FsiMeshDataD::CopyFromH(const FsiMeshDataH& other) {
thrust::copy(other.pos_fsi_fea_H.begin(), other.pos_fsi_fea_H.end(), pos_fsi_fea_D.begin());
thrust::copy(other.vel_fsi_fea_H.begin(), other.vel_fsi_fea_H.end(), vel_fsi_fea_D.begin());
thrust::copy(other.acc_fsi_fea_H.begin(), other.acc_fsi_fea_H.end(), acc_fsi_fea_D.begin());
thrust::copy(other.dir_fsi_fea_H.begin(), other.dir_fsi_fea_H.end(), dir_fsi_fea_D.begin());
}
FsiBodiesDataD& FsiBodiesDataD::operator=(const FsiBodiesDataD& other) {
if (this == &other) {
return *this;
}
thrust::copy(other.posRigid_fsiBodies_D.begin(), other.posRigid_fsiBodies_D.end(), posRigid_fsiBodies_D.begin());
thrust::copy(other.velMassRigid_fsiBodies_D.begin(), other.velMassRigid_fsiBodies_D.end(),
velMassRigid_fsiBodies_D.begin());
thrust::copy(other.accRigid_fsiBodies_D.begin(), other.accRigid_fsiBodies_D.end(), accRigid_fsiBodies_D.begin());
thrust::copy(other.q_fsiBodies_D.begin(), other.q_fsiBodies_D.end(), q_fsiBodies_D.begin());
thrust::copy(other.omegaVelLRF_fsiBodies_D.begin(), other.omegaVelLRF_fsiBodies_D.end(),
omegaVelLRF_fsiBodies_D.begin());
thrust::copy(other.omegaAccLRF_fsiBodies_D.begin(), other.omegaAccLRF_fsiBodies_D.end(),
omegaAccLRF_fsiBodies_D.begin());
return *this;
}
FsiShellsDataD& FsiShellsDataD::operator=(const FsiShellsDataD& other) {
if (this == &other) {
return *this;
}
thrust::copy(other.posFlex_fsiBodies_nA_D.begin(), other.posFlex_fsiBodies_nA_D.end(),
posFlex_fsiBodies_nA_D.begin());
thrust::copy(other.posFlex_fsiBodies_nB_D.begin(), other.posFlex_fsiBodies_nB_D.end(),
posFlex_fsiBodies_nB_D.begin());
thrust::copy(other.posFlex_fsiBodies_nC_D.begin(), other.posFlex_fsiBodies_nC_D.end(),
posFlex_fsiBodies_nC_D.begin());
thrust::copy(other.posFlex_fsiBodies_nD_D.begin(), other.posFlex_fsiBodies_nD_D.end(),
posFlex_fsiBodies_nD_D.begin());
thrust::copy(other.velFlex_fsiBodies_nA_D.begin(), other.velFlex_fsiBodies_nA_D.end(),
velFlex_fsiBodies_nA_D.begin());
thrust::copy(other.velFlex_fsiBodies_nB_D.begin(), other.velFlex_fsiBodies_nB_D.end(),
velFlex_fsiBodies_nB_D.begin());
thrust::copy(other.velFlex_fsiBodies_nC_D.begin(), other.velFlex_fsiBodies_nC_D.end(),
velFlex_fsiBodies_nC_D.begin());
thrust::copy(other.velFlex_fsiBodies_nD_D.begin(), other.velFlex_fsiBodies_nD_D.end(),
velFlex_fsiBodies_nD_D.begin());
thrust::copy(other.accFlex_fsiBodies_nA_D.begin(), other.accFlex_fsiBodies_nA_D.end(),
posFlex_fsiBodies_nA_D.begin());
thrust::copy(other.accFlex_fsiBodies_nB_D.begin(), other.accFlex_fsiBodies_nB_D.end(),
accFlex_fsiBodies_nB_D.begin());
thrust::copy(other.accFlex_fsiBodies_nC_D.begin(), other.accFlex_fsiBodies_nC_D.end(),
accFlex_fsiBodies_nC_D.begin());
thrust::copy(other.accFlex_fsiBodies_nD_D.begin(), other.accFlex_fsiBodies_nD_D.end(),
accFlex_fsiBodies_nD_D.begin());
return *this;
}
FsiMeshDataD& FsiMeshDataD::operator=(const FsiMeshDataD& other) {
if (this == &other) {
return *this;
}
thrust::copy(other.pos_fsi_fea_D.begin(), other.pos_fsi_fea_D.end(), pos_fsi_fea_D.begin());
thrust::copy(other.vel_fsi_fea_D.begin(), other.vel_fsi_fea_D.end(), vel_fsi_fea_D.begin());
thrust::copy(other.acc_fsi_fea_D.begin(), other.acc_fsi_fea_D.end(), acc_fsi_fea_D.begin());
thrust::copy(other.dir_fsi_fea_D.begin(), other.dir_fsi_fea_D.end(), dir_fsi_fea_D.begin());
return *this;
}
//---------------------------------------------------------------------------------------
zipIterRigidH FsiBodiesDataH::iterator() {
return thrust::make_zip_iterator(
thrust::make_tuple(posRigid_fsiBodies_H.begin(), velMassRigid_fsiBodies_H.begin(), accRigid_fsiBodies_H.begin(),
q_fsiBodies_H.begin(), omegaVelLRF_fsiBodies_H.begin(), omegaAccLRF_fsiBodies_H.begin()));
}
void FsiBodiesDataH::resize(size_t s) {
posRigid_fsiBodies_H.resize(s);
velMassRigid_fsiBodies_H.resize(s);
accRigid_fsiBodies_H.resize(s);
q_fsiBodies_H.resize(s);
omegaVelLRF_fsiBodies_H.resize(s);
omegaAccLRF_fsiBodies_H.resize(s);
}
//---------------------------------------------------------------------------------------
void ProximityDataD::resize(size_t s) {
gridMarkerHashD.resize(s);
gridMarkerIndexD.resize(s);
mapOriginalToSorted.resize(s);
}
//---------------------------------------------------------------------------------------
ChronoBodiesDataH::ChronoBodiesDataH(size_t s) {
resize(s);
}
ChronoShellsDataH::ChronoShellsDataH(size_t s) {
resize(s);
}
ChronoMeshDataH::ChronoMeshDataH(size_t s) {
resize(s);
}
zipIterChronoBodiesH ChronoBodiesDataH::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(pos_ChSystemH.begin(), vel_ChSystemH.begin(),
acc_ChSystemH.begin(), quat_ChSystemH.begin(),
omegaVelGRF_ChSystemH.begin(), omegaAccGRF_ChSystemH.begin()));
}
void ChronoBodiesDataH::resize(size_t s) {
pos_ChSystemH.resize(s);
vel_ChSystemH.resize(s);
acc_ChSystemH.resize(s);
quat_ChSystemH.resize(s);
omegaVelGRF_ChSystemH.resize(s);
omegaAccGRF_ChSystemH.resize(s);
}
void ChronoShellsDataH::resize(size_t s) {
posFlex_ChSystemH_nA_H.resize(s);
posFlex_ChSystemH_nB_H.resize(s);
posFlex_ChSystemH_nC_H.resize(s);
posFlex_ChSystemH_nD_H.resize(s);
velFlex_ChSystemH_nA_H.resize(s);
velFlex_ChSystemH_nB_H.resize(s);
velFlex_ChSystemH_nC_H.resize(s);
velFlex_ChSystemH_nD_H.resize(s);
accFlex_ChSystemH_nA_H.resize(s);
accFlex_ChSystemH_nB_H.resize(s);
accFlex_ChSystemH_nC_H.resize(s);
accFlex_ChSystemH_nD_H.resize(s);
}
void ChronoMeshDataH::resize(size_t s) {
posFlex_ChSystemH_H.resize(s);
velFlex_ChSystemH_H.resize(s);
accFlex_ChSystemH_H.resize(s);
dirFlex_ChSystemH_H.resize(s);
}
//---------------------------------------------------------------------------------------
ChSystemFsi_impl::ChSystemFsi_impl(std::shared_ptr<SimParams> params) : paramsH(params) {
numObjects = chrono_types::make_shared<ChCounters>();
InitNumObjects();
sphMarkersD1 = chrono_types::make_shared<SphMarkerDataD>();
sphMarkersD2 = chrono_types::make_shared<SphMarkerDataD>();
sortedSphMarkersD = chrono_types::make_shared<SphMarkerDataD>();
sphMarkersH = chrono_types::make_shared<SphMarkerDataH>();
fsiBodiesD1 = chrono_types::make_shared<FsiBodiesDataD>();
fsiBodiesD2 = chrono_types::make_shared<FsiBodiesDataD>();
fsiBodiesH = chrono_types::make_shared<FsiBodiesDataH>();
fsiMeshD = chrono_types::make_shared<FsiMeshDataD>();
fsiMeshH = chrono_types::make_shared<FsiMeshDataH>();
fsiGeneralData = chrono_types::make_shared<FsiGeneralData>();
markersProximityD = chrono_types::make_shared<ProximityDataD>();
}
ChSystemFsi_impl::~ChSystemFsi_impl() {}
void ChSystemFsi_impl::AddSPHParticle(Real4 pos, Real4 rhoPresMu, Real3 vel, Real3 tauXxYyZz, Real3 tauXyXzYz) {
sphMarkersH->posRadH.push_back(pos);
sphMarkersH->velMasH.push_back(vel);
sphMarkersH->rhoPresMuH.push_back(rhoPresMu);
sphMarkersH->tauXyXzYzH.push_back(tauXyXzYz);
sphMarkersH->tauXxYyZzH.push_back(tauXxYyZz);
}
void ChSystemFsi_impl::ArrangeDataManager() {
thrust::host_vector<Real4> dummyRhoPresMuH = sphMarkersH->rhoPresMuH;
dummyRhoPresMuH.clear();
}
void ChSystemFsi_impl::InitNumObjects() {
numObjects->numRigidBodies = 0; // Number of rigid bodies
numObjects->numFlexBodies1D = 0; // Number of 1D Flexible bodies
numObjects->numFlexBodies2D = 0; // Number of 2D Flexible bodies
numObjects->numFlexNodes = 0; // Number of FE nodes
numObjects->numGhostMarkers = 0; // Number of ghost particles
numObjects->numHelperMarkers = 0; // Number of helper particles
numObjects->numFluidMarkers = 0; // Number of fluid SPH particles
numObjects->numBoundaryMarkers = 0; // Number of boundary SPH particles
numObjects->startRigidMarkers = 0; // Start index of the rigid SPH particles
numObjects->startFlexMarkers = 0; // Start index of the flexible SPH particles
numObjects->numRigidMarkers = 0; // Number of rigid SPH particles
numObjects->numFlexMarkers = 0; // Number of flexible SPH particles
numObjects->numAllMarkers = 0; // Total number of SPH particles
}
void ChSystemFsi_impl::CalcNumObjects() {
InitNumObjects();
size_t rSize = fsiGeneralData->referenceArray.size();
for (size_t i = 0; i < rSize; i++) {
int4 rComp4 = fsiGeneralData->referenceArray[i];
int numMarkers = rComp4.y - rComp4.x;
switch (rComp4.z) {
case -3:
numObjects->numHelperMarkers += numMarkers;
break;
case -2:
numObjects->numGhostMarkers += numMarkers;
break;
case -1:
numObjects->numFluidMarkers += numMarkers;
break;
case 0:
numObjects->numBoundaryMarkers += numMarkers;
break;
case 1:
numObjects->numRigidMarkers += numMarkers;
numObjects->numRigidBodies++;
break;
case 2:
numObjects->numFlexMarkers += numMarkers;
numObjects->numFlexBodies1D++;
break;
case 3:
numObjects->numFlexMarkers += numMarkers;
numObjects->numFlexBodies2D++;
break;
default:
std::cerr << "ERROR (CalcNumObjects): particle type not defined." << std::endl;
throw std::runtime_error("Particle type not defined.");
break;
}
}
numObjects->numFluidMarkers += numObjects->numGhostMarkers + numObjects->numHelperMarkers;
numObjects->numAllMarkers = numObjects->numFluidMarkers + numObjects->numBoundaryMarkers +
numObjects->numRigidMarkers + numObjects->numFlexMarkers;
numObjects->startRigidMarkers = numObjects->numFluidMarkers + numObjects->numBoundaryMarkers;
numObjects->startFlexMarkers =
numObjects->numFluidMarkers + numObjects->numBoundaryMarkers + numObjects->numRigidMarkers;
}
void ChSystemFsi_impl::ConstructReferenceArray() {
auto numAllMarkers = sphMarkersH->rhoPresMuH.size();
thrust::host_vector<int> numComponentMarkers(numAllMarkers);
thrust::fill(numComponentMarkers.begin(), numComponentMarkers.end(), 1);
thrust::host_vector<Real4> dummyRhoPresMuH = sphMarkersH->rhoPresMuH;
thrust::copy(sphMarkersH->rhoPresMuH.begin(), sphMarkersH->rhoPresMuH.end(), dummyRhoPresMuH.begin());
size_t numberOfComponents =
(thrust::reduce_by_key(dummyRhoPresMuH.begin(), dummyRhoPresMuH.end(), numComponentMarkers.begin(),
dummyRhoPresMuH.begin(), numComponentMarkers.begin(), sphTypeCompEqual()))
.first -
dummyRhoPresMuH.begin();
dummyRhoPresMuH.resize(numberOfComponents);
numComponentMarkers.resize(numberOfComponents);
fsiGeneralData->referenceArray.clear();
fsiGeneralData->referenceArray_FEA.clear();
// Loop through all components loading referenceArray and referenceArray_FEA
int start_index = 0;
for (size_t i = 0; i < numberOfComponents; i++) {
int compType = (int)::floor(dummyRhoPresMuH[i].w + .1);
int phaseType = -1;
if (compType == -3) {
phaseType = -1; // For helper
} else if (compType == -2) {
phaseType = -1; // For ghost
} else if (compType == -1) {
phaseType = -1; // For fluid/granular
} else if (compType == 0) {
phaseType = 0; // For boundary
} else if (compType == 1) {
phaseType = 1; // For rigid
} else if (compType == 2) {
phaseType = 1; // For 1D cable elements
} else if (compType == 3) {
phaseType = 1; // For 2D shell elements
} else {
phaseType = 1;
}
auto new_entry = mI4(start_index, start_index + numComponentMarkers[i], compType, phaseType);
start_index += numComponentMarkers[i];
fsiGeneralData->referenceArray.push_back(new_entry);
if (compType == 2 || compType == 3)
fsiGeneralData->referenceArray_FEA.push_back(new_entry);
}
dummyRhoPresMuH.clear();
numComponentMarkers.clear();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChSystemFsi_impl::ResizeData(size_t numRigidBodies,
size_t numFlexBodies1D,
size_t numFlexBodies2D,
size_t numFlexNodes) {
ConstructReferenceArray();
CalcNumObjects();
if (numObjects->numAllMarkers != sphMarkersH->rhoPresMuH.size()) {
std::cerr << "ERROR (ResizeData): mismatch in total number of markers." << std::endl;
throw std::runtime_error("Mismatch in total number of markers.");
}
// Set number of interface objects
numObjects->numRigidBodies = numRigidBodies;
numObjects->numFlexBodies1D = numFlexBodies1D;
numObjects->numFlexBodies2D = numFlexBodies2D;
numObjects->numFlexNodes = numFlexNodes;
sphMarkersD1->resize(numObjects->numAllMarkers);
sphMarkersD2->resize(numObjects->numAllMarkers);
sortedSphMarkersD->resize(numObjects->numAllMarkers);
sphMarkersH->resize(numObjects->numAllMarkers);
markersProximityD->resize(numObjects->numAllMarkers);
fsiGeneralData->derivVelRhoD.resize(numObjects->numAllMarkers);
fsiGeneralData->derivVelRhoD_old.resize(numObjects->numAllMarkers);
fsiGeneralData->derivTauXxYyZzD.resize(numObjects->numAllMarkers);
fsiGeneralData->derivTauXyXzYzD.resize(numObjects->numAllMarkers);
fsiGeneralData->vel_XSPH_D.resize(numObjects->numAllMarkers);
fsiGeneralData->vis_vel_SPH_D.resize(numObjects->numAllMarkers, mR3(1e-20));
fsiGeneralData->sr_tau_I_mu_i.resize(numObjects->numAllMarkers, mR4(1e-20));
fsiGeneralData->activityIdentifierD.resize(numObjects->numAllMarkers, 1);
fsiGeneralData->extendedActivityIdD.resize(numObjects->numAllMarkers, 1);
fsiGeneralData->freeSurfaceIdD.resize(numObjects->numAllMarkers, 0);
thrust::copy(sphMarkersH->posRadH.begin(), sphMarkersH->posRadH.end(), sphMarkersD1->posRadD.begin());
thrust::copy(sphMarkersH->velMasH.begin(), sphMarkersH->velMasH.end(), sphMarkersD1->velMasD.begin());
thrust::copy(sphMarkersH->rhoPresMuH.begin(), sphMarkersH->rhoPresMuH.end(), sphMarkersD1->rhoPresMuD.begin());
thrust::copy(sphMarkersH->tauXxYyZzH.begin(), sphMarkersH->tauXxYyZzH.end(), sphMarkersD1->tauXxYyZzD.begin());
thrust::copy(sphMarkersH->tauXyXzYzH.begin(), sphMarkersH->tauXyXzYzH.end(), sphMarkersD1->tauXyXzYzD.begin());
thrust::copy(sphMarkersD1->posRadD.begin(), sphMarkersD1->posRadD.end(), sphMarkersD2->posRadD.begin());
thrust::copy(sphMarkersD1->velMasD.begin(), sphMarkersD1->velMasD.end(), sphMarkersD2->velMasD.begin());
thrust::copy(sphMarkersD1->rhoPresMuD.begin(), sphMarkersD1->rhoPresMuD.end(), sphMarkersD2->rhoPresMuD.begin());
thrust::copy(sphMarkersD1->tauXxYyZzD.begin(), sphMarkersD1->tauXxYyZzD.end(), sphMarkersD2->tauXxYyZzD.begin());
thrust::copy(sphMarkersD1->tauXyXzYzD.begin(), sphMarkersD1->tauXyXzYzD.end(), sphMarkersD2->tauXyXzYzD.begin());
fsiBodiesD1->resize(numObjects->numRigidBodies);
fsiBodiesD2->resize(numObjects->numRigidBodies);
fsiBodiesH->resize(numObjects->numRigidBodies);
fsiGeneralData->rigid_FSI_ForcesD.resize(numObjects->numRigidBodies);
fsiGeneralData->rigid_FSI_TorquesD.resize(numObjects->numRigidBodies);
fsiGeneralData->rigidIdentifierD.resize(numObjects->numRigidMarkers);
fsiGeneralData->rigidSPH_MeshPos_LRF_D.resize(numObjects->numRigidMarkers);
fsiGeneralData->FlexIdentifierD.resize(numObjects->numFlexMarkers);
fsiGeneralData->FlexSPH_MeshPos_LRF_D.resize(numObjects->numFlexMarkers);
fsiGeneralData->FlexSPH_MeshPos_LRF_H.resize(numObjects->numFlexMarkers);
fsiGeneralData->CableElementsNodesD.resize(fsiGeneralData->CableElementsNodesH.size());
fsiGeneralData->ShellElementsNodesD.resize(fsiGeneralData->ShellElementsNodesH.size());
thrust::copy(fsiGeneralData->CableElementsNodesH.begin(), fsiGeneralData->CableElementsNodesH.end(),
fsiGeneralData->CableElementsNodesD.begin());
thrust::copy(fsiGeneralData->ShellElementsNodesH.begin(), fsiGeneralData->ShellElementsNodesH.end(),
fsiGeneralData->ShellElementsNodesD.begin());
fsiMeshD->resize(numObjects->numFlexNodes);
fsiMeshH->resize(numObjects->numFlexNodes);
fsiGeneralData->Flex_FSI_ForcesD.resize(numObjects->numFlexNodes);
}
//--------------------------------------------------------------------------------------------------------------------------------
struct scale_functor {
scale_functor(Real a) : m_a(a) {}
__host__ __device__ Real4 operator()(Real4& x) const { return m_a * x; }
const Real m_a;
};
thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleAccelerations() {
const auto n = numObjects->numFluidMarkers;
// Copy data for SPH particles only
thrust::device_vector<Real4> accD(n);
thrust::copy_n(fsiGeneralData->derivVelRhoD.begin(), n, accD.begin());
return accD;
}
thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleForces() {
thrust::device_vector<Real4> frcD = GetParticleAccelerations();
thrust::transform(frcD.begin(), frcD.end(), frcD.begin(), scale_functor(paramsH->markerMass));
return frcD;
}
//--------------------------------------------------------------------------------------------------------------------------------
struct in_box {
in_box() {}
__device__ bool operator()(const Real4 v) {
// Convert location in box frame
auto d = mR3(v) - pos;
auto w = mR3( //
ax.x * d.x + ax.y * d.y + ax.z * d.z, //
ay.x * d.x + ay.y * d.y + ay.z * d.z, //
az.x * d.x + az.y * d.y + az.z * d.z //
);
// Check w between all box limits
return (w.x >= -hsize.x && w.x <= +hsize.x) && (w.y >= -hsize.y && w.y <= +hsize.y) &&
(w.z >= -hsize.z && w.z <= +hsize.z);
}
Real3 hsize;
Real3 pos;
Real3 ax;
Real3 ay;
Real3 az;
};
thrust::device_vector<int> ChSystemFsi_impl::FindParticlesInBox(const Real3& hsize,
const Real3& pos,
const Real3& ax,
const Real3& ay,
const Real3& az) {
// Extract indices of SPH particles contained in the OBB
auto& ref = fsiGeneralData->referenceArray;
auto& pos_D = sphMarkersD2->posRadD;
// Find start and end locations for SPH particles (exclude ghost and BCE markers)
int haveHelper = (ref[0].z == -3) ? 1 : 0;
int haveGhost = (ref[0].z == -2 || ref[1].z == -2) ? 1 : 0;
auto sph_start = ref[haveHelper + haveGhost].x;
auto sph_end = ref[haveHelper + haveGhost].y;
auto num_sph = sph_end - sph_start;
// Preallocate output vector of indices
thrust::device_vector<int> indices_D(num_sph);
// Extract indices of SPH particles inside OBB
thrust::counting_iterator<int> first(0);
thrust::counting_iterator<int> last(num_sph);
in_box predicate;
predicate.hsize = hsize;
predicate.pos = pos;
predicate.ax = ax;
predicate.ay = ay;
predicate.az = az;
auto end = thrust::copy_if(thrust::device, // execution policy
first, last, // range of all particle indices
pos_D.begin(), // stencil vector
indices_D.begin(), // beginning of destination
predicate // predicate for stencil elements
);
// Trim the output vector of indices
size_t num_active = (size_t)(end - indices_D.begin());
indices_D.resize(num_active);
return indices_D;
}
// Gather positions from particles with specified indices
thrust::device_vector<Real4> ChSystemFsi_impl::GetParticlePositions(const thrust::device_vector<int>& indices) {
const auto& allpos = sphMarkersD2->posRadD;
thrust::device_vector<Real4> pos(allpos.size());
auto end = thrust::gather(thrust::device, // execution policy
indices.begin(), indices.end(), // range of gather locations
allpos.begin(), // beginning of source
pos.begin() // beginning of destination
);
// Trim the output vector of particle positions
size_t num_active = (size_t)(end - pos.begin());
assert(num_active == indices.size());
pos.resize(num_active);
return pos;
}
// Gather velocities from particles with specified indices
thrust::device_vector<Real3> ChSystemFsi_impl::GetParticleVelocities(const thrust::device_vector<int>& indices) {
auto allvel = sphMarkersD2->velMasD;
thrust::device_vector<Real3> vel(allvel.size());
auto end = thrust::gather(thrust::device, // execution policy
indices.begin(), indices.end(), // range of gather locations
allvel.begin(), // beginning of source
vel.begin() // beginning of destination
);
// Trim the output vector of particle positions
size_t num_active = (size_t)(end - vel.begin());
assert(num_active == indices.size());
vel.resize(num_active);
return vel;
}
// Gather accelerations from particles with specified indices
thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleAccelerations(const thrust::device_vector<int>& indices) {
auto allacc = GetParticleAccelerations();
thrust::device_vector<Real4> acc(allacc.size());
auto end = thrust::gather(thrust::device, // execution policy
indices.begin(), indices.end(), // range of gather locations
allacc.begin(), // beginning of source
acc.begin() // beginning of destination
);
// Trim the output vector of particle positions
size_t num_active = (size_t)(end - acc.begin());
assert(num_active == indices.size());
acc.resize(num_active);
return acc;
}
thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleForces(const thrust::device_vector<int>& indices) {
auto allforces = GetParticleForces();
thrust::device_vector<Real4> forces(allforces.size());
auto end = thrust::gather(thrust::device, // execution policy
indices.begin(), indices.end(), // range of gather locations
allforces.begin(), // beginning of source
forces.begin() // beginning of destination
);
// Trim the output vector of particle positions
size_t num_active = (size_t)(end - forces.begin());
assert(num_active == indices.size());
forces.resize(num_active);
return forces;
}
} // end namespace fsi
} // end namespace chrono
| 383962b0c966cb5e542f4d1ceb164eb1a90f3476.cu | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Milad Rakhsha, Arman Pazouki
// =============================================================================
//
// Implementation of FSI system that includes all subclasses for proximity and
// force calculation, and time integration.
//
// =============================================================================
#include <thrust/copy.h>
#include <thrust/gather.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/transform.h>
#include "chrono_fsi/physics/ChSystemFsi_impl.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
namespace chrono {
namespace fsi {
struct sphTypeCompEqual {
__host__ __device__ bool operator()(const Real4& o1, const Real4& o2) { return o1.w == o2.w; }
};
//---------------------------------------------------------------------------------------
zipIterSphD SphMarkerDataD::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(posRadD.begin(), velMasD.begin(), rhoPresMuD.begin(),
tauXxYyZzD.begin(), tauXyXzYzD.begin()));
}
void SphMarkerDataD::resize(size_t s) {
posRadD.resize(s);
velMasD.resize(s);
rhoPresMuD.resize(s);
tauXxYyZzD.resize(s);
tauXyXzYzD.resize(s);
}
//---------------------------------------------------------------------------------------
zipIterSphH SphMarkerDataH::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(posRadH.begin(), velMasH.begin(), rhoPresMuH.begin(),
tauXxYyZzH.begin(), tauXyXzYzH.begin()));
}
// resize
void SphMarkerDataH::resize(size_t s) {
posRadH.resize(s);
velMasH.resize(s);
rhoPresMuH.resize(s);
tauXxYyZzH.resize(s);
tauXyXzYzH.resize(s);
}
//---------------------------------------------------------------------------------------
zipIterRigidD FsiBodiesDataD::iterator() {
return thrust::make_zip_iterator(
thrust::make_tuple(posRigid_fsiBodies_D.begin(), velMassRigid_fsiBodies_D.begin(), accRigid_fsiBodies_D.begin(),
q_fsiBodies_D.begin(), omegaVelLRF_fsiBodies_D.begin(), omegaAccLRF_fsiBodies_D.begin()));
}
void FsiBodiesDataD::resize(size_t s) {
posRigid_fsiBodies_D.resize(s);
velMassRigid_fsiBodies_D.resize(s);
accRigid_fsiBodies_D.resize(s);
q_fsiBodies_D.resize(s);
omegaVelLRF_fsiBodies_D.resize(s);
omegaAccLRF_fsiBodies_D.resize(s);
}
void FsiShellsDataH::resize(size_t s) {
posFlex_fsiBodies_nA_H.resize(s);
posFlex_fsiBodies_nB_H.resize(s);
posFlex_fsiBodies_nC_H.resize(s);
posFlex_fsiBodies_nD_H.resize(s);
velFlex_fsiBodies_nA_H.resize(s);
velFlex_fsiBodies_nB_H.resize(s);
velFlex_fsiBodies_nC_H.resize(s);
velFlex_fsiBodies_nD_H.resize(s);
accFlex_fsiBodies_nA_H.resize(s);
accFlex_fsiBodies_nB_H.resize(s);
accFlex_fsiBodies_nC_H.resize(s);
accFlex_fsiBodies_nD_H.resize(s);
}
void FsiShellsDataD::resize(size_t s) {
posFlex_fsiBodies_nA_D.resize(s);
posFlex_fsiBodies_nB_D.resize(s);
posFlex_fsiBodies_nC_D.resize(s);
posFlex_fsiBodies_nD_D.resize(s);
velFlex_fsiBodies_nA_D.resize(s);
velFlex_fsiBodies_nB_D.resize(s);
velFlex_fsiBodies_nC_D.resize(s);
velFlex_fsiBodies_nD_D.resize(s);
accFlex_fsiBodies_nA_D.resize(s);
accFlex_fsiBodies_nB_D.resize(s);
accFlex_fsiBodies_nC_D.resize(s);
accFlex_fsiBodies_nD_D.resize(s);
}
void FsiMeshDataH::resize(size_t s) {
pos_fsi_fea_H.resize(s);
vel_fsi_fea_H.resize(s);
acc_fsi_fea_H.resize(s);
dir_fsi_fea_H.resize(s);
}
void FsiMeshDataD::resize(size_t s) {
pos_fsi_fea_D.resize(s);
vel_fsi_fea_D.resize(s);
acc_fsi_fea_D.resize(s);
dir_fsi_fea_D.resize(s);
}
void FsiBodiesDataD::CopyFromH(const FsiBodiesDataH& other) {
thrust::copy(other.posRigid_fsiBodies_H.begin(), other.posRigid_fsiBodies_H.end(), posRigid_fsiBodies_D.begin());
thrust::copy(other.velMassRigid_fsiBodies_H.begin(), other.velMassRigid_fsiBodies_H.end(),
velMassRigid_fsiBodies_D.begin());
thrust::copy(other.accRigid_fsiBodies_H.begin(), other.accRigid_fsiBodies_H.end(), accRigid_fsiBodies_D.begin());
thrust::copy(other.q_fsiBodies_H.begin(), other.q_fsiBodies_H.end(), q_fsiBodies_D.begin());
thrust::copy(other.omegaVelLRF_fsiBodies_H.begin(), other.omegaVelLRF_fsiBodies_H.end(),
omegaVelLRF_fsiBodies_D.begin());
thrust::copy(other.omegaAccLRF_fsiBodies_H.begin(), other.omegaAccLRF_fsiBodies_H.end(),
omegaAccLRF_fsiBodies_D.begin());
}
void FsiShellsDataD::CopyFromH(const FsiShellsDataH& other) {
thrust::copy(other.posFlex_fsiBodies_nA_H.begin(), other.posFlex_fsiBodies_nA_H.end(),
posFlex_fsiBodies_nA_D.begin());
thrust::copy(other.posFlex_fsiBodies_nB_H.begin(), other.posFlex_fsiBodies_nB_H.end(),
posFlex_fsiBodies_nB_D.begin());
thrust::copy(other.posFlex_fsiBodies_nC_H.begin(), other.posFlex_fsiBodies_nC_H.end(),
posFlex_fsiBodies_nC_D.begin());
thrust::copy(other.posFlex_fsiBodies_nD_H.begin(), other.posFlex_fsiBodies_nD_H.end(),
posFlex_fsiBodies_nD_D.begin());
thrust::copy(other.velFlex_fsiBodies_nA_H.begin(), other.velFlex_fsiBodies_nA_H.end(),
velFlex_fsiBodies_nA_D.begin());
thrust::copy(other.velFlex_fsiBodies_nB_H.begin(), other.velFlex_fsiBodies_nB_H.end(),
velFlex_fsiBodies_nB_D.begin());
thrust::copy(other.velFlex_fsiBodies_nC_H.begin(), other.velFlex_fsiBodies_nC_H.end(),
velFlex_fsiBodies_nC_D.begin());
thrust::copy(other.velFlex_fsiBodies_nD_H.begin(), other.velFlex_fsiBodies_nD_H.end(),
velFlex_fsiBodies_nD_D.begin());
thrust::copy(other.accFlex_fsiBodies_nA_H.begin(), other.accFlex_fsiBodies_nA_H.end(),
accFlex_fsiBodies_nA_D.begin());
thrust::copy(other.accFlex_fsiBodies_nB_H.begin(), other.accFlex_fsiBodies_nB_H.end(),
accFlex_fsiBodies_nB_D.begin());
thrust::copy(other.accFlex_fsiBodies_nC_H.begin(), other.accFlex_fsiBodies_nC_H.end(),
accFlex_fsiBodies_nC_D.begin());
thrust::copy(other.accFlex_fsiBodies_nD_H.begin(), other.accFlex_fsiBodies_nD_H.end(),
accFlex_fsiBodies_nD_D.begin());
}
void FsiMeshDataD::CopyFromH(const FsiMeshDataH& other) {
thrust::copy(other.pos_fsi_fea_H.begin(), other.pos_fsi_fea_H.end(), pos_fsi_fea_D.begin());
thrust::copy(other.vel_fsi_fea_H.begin(), other.vel_fsi_fea_H.end(), vel_fsi_fea_D.begin());
thrust::copy(other.acc_fsi_fea_H.begin(), other.acc_fsi_fea_H.end(), acc_fsi_fea_D.begin());
thrust::copy(other.dir_fsi_fea_H.begin(), other.dir_fsi_fea_H.end(), dir_fsi_fea_D.begin());
}
FsiBodiesDataD& FsiBodiesDataD::operator=(const FsiBodiesDataD& other) {
if (this == &other) {
return *this;
}
thrust::copy(other.posRigid_fsiBodies_D.begin(), other.posRigid_fsiBodies_D.end(), posRigid_fsiBodies_D.begin());
thrust::copy(other.velMassRigid_fsiBodies_D.begin(), other.velMassRigid_fsiBodies_D.end(),
velMassRigid_fsiBodies_D.begin());
thrust::copy(other.accRigid_fsiBodies_D.begin(), other.accRigid_fsiBodies_D.end(), accRigid_fsiBodies_D.begin());
thrust::copy(other.q_fsiBodies_D.begin(), other.q_fsiBodies_D.end(), q_fsiBodies_D.begin());
thrust::copy(other.omegaVelLRF_fsiBodies_D.begin(), other.omegaVelLRF_fsiBodies_D.end(),
omegaVelLRF_fsiBodies_D.begin());
thrust::copy(other.omegaAccLRF_fsiBodies_D.begin(), other.omegaAccLRF_fsiBodies_D.end(),
omegaAccLRF_fsiBodies_D.begin());
return *this;
}
FsiShellsDataD& FsiShellsDataD::operator=(const FsiShellsDataD& other) {
if (this == &other) {
return *this;
}
thrust::copy(other.posFlex_fsiBodies_nA_D.begin(), other.posFlex_fsiBodies_nA_D.end(),
posFlex_fsiBodies_nA_D.begin());
thrust::copy(other.posFlex_fsiBodies_nB_D.begin(), other.posFlex_fsiBodies_nB_D.end(),
posFlex_fsiBodies_nB_D.begin());
thrust::copy(other.posFlex_fsiBodies_nC_D.begin(), other.posFlex_fsiBodies_nC_D.end(),
posFlex_fsiBodies_nC_D.begin());
thrust::copy(other.posFlex_fsiBodies_nD_D.begin(), other.posFlex_fsiBodies_nD_D.end(),
posFlex_fsiBodies_nD_D.begin());
thrust::copy(other.velFlex_fsiBodies_nA_D.begin(), other.velFlex_fsiBodies_nA_D.end(),
velFlex_fsiBodies_nA_D.begin());
thrust::copy(other.velFlex_fsiBodies_nB_D.begin(), other.velFlex_fsiBodies_nB_D.end(),
velFlex_fsiBodies_nB_D.begin());
thrust::copy(other.velFlex_fsiBodies_nC_D.begin(), other.velFlex_fsiBodies_nC_D.end(),
velFlex_fsiBodies_nC_D.begin());
thrust::copy(other.velFlex_fsiBodies_nD_D.begin(), other.velFlex_fsiBodies_nD_D.end(),
velFlex_fsiBodies_nD_D.begin());
thrust::copy(other.accFlex_fsiBodies_nA_D.begin(), other.accFlex_fsiBodies_nA_D.end(),
posFlex_fsiBodies_nA_D.begin());
thrust::copy(other.accFlex_fsiBodies_nB_D.begin(), other.accFlex_fsiBodies_nB_D.end(),
accFlex_fsiBodies_nB_D.begin());
thrust::copy(other.accFlex_fsiBodies_nC_D.begin(), other.accFlex_fsiBodies_nC_D.end(),
accFlex_fsiBodies_nC_D.begin());
thrust::copy(other.accFlex_fsiBodies_nD_D.begin(), other.accFlex_fsiBodies_nD_D.end(),
accFlex_fsiBodies_nD_D.begin());
return *this;
}
FsiMeshDataD& FsiMeshDataD::operator=(const FsiMeshDataD& other) {
if (this == &other) {
return *this;
}
thrust::copy(other.pos_fsi_fea_D.begin(), other.pos_fsi_fea_D.end(), pos_fsi_fea_D.begin());
thrust::copy(other.vel_fsi_fea_D.begin(), other.vel_fsi_fea_D.end(), vel_fsi_fea_D.begin());
thrust::copy(other.acc_fsi_fea_D.begin(), other.acc_fsi_fea_D.end(), acc_fsi_fea_D.begin());
thrust::copy(other.dir_fsi_fea_D.begin(), other.dir_fsi_fea_D.end(), dir_fsi_fea_D.begin());
return *this;
}
//---------------------------------------------------------------------------------------
zipIterRigidH FsiBodiesDataH::iterator() {
return thrust::make_zip_iterator(
thrust::make_tuple(posRigid_fsiBodies_H.begin(), velMassRigid_fsiBodies_H.begin(), accRigid_fsiBodies_H.begin(),
q_fsiBodies_H.begin(), omegaVelLRF_fsiBodies_H.begin(), omegaAccLRF_fsiBodies_H.begin()));
}
void FsiBodiesDataH::resize(size_t s) {
posRigid_fsiBodies_H.resize(s);
velMassRigid_fsiBodies_H.resize(s);
accRigid_fsiBodies_H.resize(s);
q_fsiBodies_H.resize(s);
omegaVelLRF_fsiBodies_H.resize(s);
omegaAccLRF_fsiBodies_H.resize(s);
}
//---------------------------------------------------------------------------------------
void ProximityDataD::resize(size_t s) {
gridMarkerHashD.resize(s);
gridMarkerIndexD.resize(s);
mapOriginalToSorted.resize(s);
}
//---------------------------------------------------------------------------------------
ChronoBodiesDataH::ChronoBodiesDataH(size_t s) {
resize(s);
}
ChronoShellsDataH::ChronoShellsDataH(size_t s) {
resize(s);
}
ChronoMeshDataH::ChronoMeshDataH(size_t s) {
resize(s);
}
zipIterChronoBodiesH ChronoBodiesDataH::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(pos_ChSystemH.begin(), vel_ChSystemH.begin(),
acc_ChSystemH.begin(), quat_ChSystemH.begin(),
omegaVelGRF_ChSystemH.begin(), omegaAccGRF_ChSystemH.begin()));
}
void ChronoBodiesDataH::resize(size_t s) {
pos_ChSystemH.resize(s);
vel_ChSystemH.resize(s);
acc_ChSystemH.resize(s);
quat_ChSystemH.resize(s);
omegaVelGRF_ChSystemH.resize(s);
omegaAccGRF_ChSystemH.resize(s);
}
void ChronoShellsDataH::resize(size_t s) {
posFlex_ChSystemH_nA_H.resize(s);
posFlex_ChSystemH_nB_H.resize(s);
posFlex_ChSystemH_nC_H.resize(s);
posFlex_ChSystemH_nD_H.resize(s);
velFlex_ChSystemH_nA_H.resize(s);
velFlex_ChSystemH_nB_H.resize(s);
velFlex_ChSystemH_nC_H.resize(s);
velFlex_ChSystemH_nD_H.resize(s);
accFlex_ChSystemH_nA_H.resize(s);
accFlex_ChSystemH_nB_H.resize(s);
accFlex_ChSystemH_nC_H.resize(s);
accFlex_ChSystemH_nD_H.resize(s);
}
void ChronoMeshDataH::resize(size_t s) {
posFlex_ChSystemH_H.resize(s);
velFlex_ChSystemH_H.resize(s);
accFlex_ChSystemH_H.resize(s);
dirFlex_ChSystemH_H.resize(s);
}
//---------------------------------------------------------------------------------------
ChSystemFsi_impl::ChSystemFsi_impl(std::shared_ptr<SimParams> params) : paramsH(params) {
numObjects = chrono_types::make_shared<ChCounters>();
InitNumObjects();
sphMarkersD1 = chrono_types::make_shared<SphMarkerDataD>();
sphMarkersD2 = chrono_types::make_shared<SphMarkerDataD>();
sortedSphMarkersD = chrono_types::make_shared<SphMarkerDataD>();
sphMarkersH = chrono_types::make_shared<SphMarkerDataH>();
fsiBodiesD1 = chrono_types::make_shared<FsiBodiesDataD>();
fsiBodiesD2 = chrono_types::make_shared<FsiBodiesDataD>();
fsiBodiesH = chrono_types::make_shared<FsiBodiesDataH>();
fsiMeshD = chrono_types::make_shared<FsiMeshDataD>();
fsiMeshH = chrono_types::make_shared<FsiMeshDataH>();
fsiGeneralData = chrono_types::make_shared<FsiGeneralData>();
markersProximityD = chrono_types::make_shared<ProximityDataD>();
}
ChSystemFsi_impl::~ChSystemFsi_impl() {}
void ChSystemFsi_impl::AddSPHParticle(Real4 pos, Real4 rhoPresMu, Real3 vel, Real3 tauXxYyZz, Real3 tauXyXzYz) {
sphMarkersH->posRadH.push_back(pos);
sphMarkersH->velMasH.push_back(vel);
sphMarkersH->rhoPresMuH.push_back(rhoPresMu);
sphMarkersH->tauXyXzYzH.push_back(tauXyXzYz);
sphMarkersH->tauXxYyZzH.push_back(tauXxYyZz);
}
void ChSystemFsi_impl::ArrangeDataManager() {
thrust::host_vector<Real4> dummyRhoPresMuH = sphMarkersH->rhoPresMuH;
dummyRhoPresMuH.clear();
}
void ChSystemFsi_impl::InitNumObjects() {
numObjects->numRigidBodies = 0; // Number of rigid bodies
numObjects->numFlexBodies1D = 0; // Number of 1D Flexible bodies
numObjects->numFlexBodies2D = 0; // Number of 2D Flexible bodies
numObjects->numFlexNodes = 0; // Number of FE nodes
numObjects->numGhostMarkers = 0; // Number of ghost particles
numObjects->numHelperMarkers = 0; // Number of helper particles
numObjects->numFluidMarkers = 0; // Number of fluid SPH particles
numObjects->numBoundaryMarkers = 0; // Number of boundary SPH particles
numObjects->startRigidMarkers = 0; // Start index of the rigid SPH particles
numObjects->startFlexMarkers = 0; // Start index of the flexible SPH particles
numObjects->numRigidMarkers = 0; // Number of rigid SPH particles
numObjects->numFlexMarkers = 0; // Number of flexible SPH particles
numObjects->numAllMarkers = 0; // Total number of SPH particles
}
void ChSystemFsi_impl::CalcNumObjects() {
InitNumObjects();
size_t rSize = fsiGeneralData->referenceArray.size();
for (size_t i = 0; i < rSize; i++) {
int4 rComp4 = fsiGeneralData->referenceArray[i];
int numMarkers = rComp4.y - rComp4.x;
switch (rComp4.z) {
case -3:
numObjects->numHelperMarkers += numMarkers;
break;
case -2:
numObjects->numGhostMarkers += numMarkers;
break;
case -1:
numObjects->numFluidMarkers += numMarkers;
break;
case 0:
numObjects->numBoundaryMarkers += numMarkers;
break;
case 1:
numObjects->numRigidMarkers += numMarkers;
numObjects->numRigidBodies++;
break;
case 2:
numObjects->numFlexMarkers += numMarkers;
numObjects->numFlexBodies1D++;
break;
case 3:
numObjects->numFlexMarkers += numMarkers;
numObjects->numFlexBodies2D++;
break;
default:
std::cerr << "ERROR (CalcNumObjects): particle type not defined." << std::endl;
throw std::runtime_error("Particle type not defined.");
break;
}
}
numObjects->numFluidMarkers += numObjects->numGhostMarkers + numObjects->numHelperMarkers;
numObjects->numAllMarkers = numObjects->numFluidMarkers + numObjects->numBoundaryMarkers +
numObjects->numRigidMarkers + numObjects->numFlexMarkers;
numObjects->startRigidMarkers = numObjects->numFluidMarkers + numObjects->numBoundaryMarkers;
numObjects->startFlexMarkers =
numObjects->numFluidMarkers + numObjects->numBoundaryMarkers + numObjects->numRigidMarkers;
}
void ChSystemFsi_impl::ConstructReferenceArray() {
auto numAllMarkers = sphMarkersH->rhoPresMuH.size();
thrust::host_vector<int> numComponentMarkers(numAllMarkers);
thrust::fill(numComponentMarkers.begin(), numComponentMarkers.end(), 1);
thrust::host_vector<Real4> dummyRhoPresMuH = sphMarkersH->rhoPresMuH;
thrust::copy(sphMarkersH->rhoPresMuH.begin(), sphMarkersH->rhoPresMuH.end(), dummyRhoPresMuH.begin());
size_t numberOfComponents =
(thrust::reduce_by_key(dummyRhoPresMuH.begin(), dummyRhoPresMuH.end(), numComponentMarkers.begin(),
dummyRhoPresMuH.begin(), numComponentMarkers.begin(), sphTypeCompEqual()))
.first -
dummyRhoPresMuH.begin();
dummyRhoPresMuH.resize(numberOfComponents);
numComponentMarkers.resize(numberOfComponents);
fsiGeneralData->referenceArray.clear();
fsiGeneralData->referenceArray_FEA.clear();
// Loop through all components loading referenceArray and referenceArray_FEA
int start_index = 0;
for (size_t i = 0; i < numberOfComponents; i++) {
int compType = (int)std::floor(dummyRhoPresMuH[i].w + .1);
int phaseType = -1;
if (compType == -3) {
phaseType = -1; // For helper
} else if (compType == -2) {
phaseType = -1; // For ghost
} else if (compType == -1) {
phaseType = -1; // For fluid/granular
} else if (compType == 0) {
phaseType = 0; // For boundary
} else if (compType == 1) {
phaseType = 1; // For rigid
} else if (compType == 2) {
phaseType = 1; // For 1D cable elements
} else if (compType == 3) {
phaseType = 1; // For 2D shell elements
} else {
phaseType = 1;
}
auto new_entry = mI4(start_index, start_index + numComponentMarkers[i], compType, phaseType);
start_index += numComponentMarkers[i];
fsiGeneralData->referenceArray.push_back(new_entry);
if (compType == 2 || compType == 3)
fsiGeneralData->referenceArray_FEA.push_back(new_entry);
}
dummyRhoPresMuH.clear();
numComponentMarkers.clear();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChSystemFsi_impl::ResizeData(size_t numRigidBodies,
size_t numFlexBodies1D,
size_t numFlexBodies2D,
size_t numFlexNodes) {
ConstructReferenceArray();
CalcNumObjects();
if (numObjects->numAllMarkers != sphMarkersH->rhoPresMuH.size()) {
std::cerr << "ERROR (ResizeData): mismatch in total number of markers." << std::endl;
throw std::runtime_error("Mismatch in total number of markers.");
}
// Set number of interface objects
numObjects->numRigidBodies = numRigidBodies;
numObjects->numFlexBodies1D = numFlexBodies1D;
numObjects->numFlexBodies2D = numFlexBodies2D;
numObjects->numFlexNodes = numFlexNodes;
sphMarkersD1->resize(numObjects->numAllMarkers);
sphMarkersD2->resize(numObjects->numAllMarkers);
sortedSphMarkersD->resize(numObjects->numAllMarkers);
sphMarkersH->resize(numObjects->numAllMarkers);
markersProximityD->resize(numObjects->numAllMarkers);
fsiGeneralData->derivVelRhoD.resize(numObjects->numAllMarkers);
fsiGeneralData->derivVelRhoD_old.resize(numObjects->numAllMarkers);
fsiGeneralData->derivTauXxYyZzD.resize(numObjects->numAllMarkers);
fsiGeneralData->derivTauXyXzYzD.resize(numObjects->numAllMarkers);
fsiGeneralData->vel_XSPH_D.resize(numObjects->numAllMarkers);
fsiGeneralData->vis_vel_SPH_D.resize(numObjects->numAllMarkers, mR3(1e-20));
fsiGeneralData->sr_tau_I_mu_i.resize(numObjects->numAllMarkers, mR4(1e-20));
fsiGeneralData->activityIdentifierD.resize(numObjects->numAllMarkers, 1);
fsiGeneralData->extendedActivityIdD.resize(numObjects->numAllMarkers, 1);
fsiGeneralData->freeSurfaceIdD.resize(numObjects->numAllMarkers, 0);
thrust::copy(sphMarkersH->posRadH.begin(), sphMarkersH->posRadH.end(), sphMarkersD1->posRadD.begin());
thrust::copy(sphMarkersH->velMasH.begin(), sphMarkersH->velMasH.end(), sphMarkersD1->velMasD.begin());
thrust::copy(sphMarkersH->rhoPresMuH.begin(), sphMarkersH->rhoPresMuH.end(), sphMarkersD1->rhoPresMuD.begin());
thrust::copy(sphMarkersH->tauXxYyZzH.begin(), sphMarkersH->tauXxYyZzH.end(), sphMarkersD1->tauXxYyZzD.begin());
thrust::copy(sphMarkersH->tauXyXzYzH.begin(), sphMarkersH->tauXyXzYzH.end(), sphMarkersD1->tauXyXzYzD.begin());
thrust::copy(sphMarkersD1->posRadD.begin(), sphMarkersD1->posRadD.end(), sphMarkersD2->posRadD.begin());
thrust::copy(sphMarkersD1->velMasD.begin(), sphMarkersD1->velMasD.end(), sphMarkersD2->velMasD.begin());
thrust::copy(sphMarkersD1->rhoPresMuD.begin(), sphMarkersD1->rhoPresMuD.end(), sphMarkersD2->rhoPresMuD.begin());
thrust::copy(sphMarkersD1->tauXxYyZzD.begin(), sphMarkersD1->tauXxYyZzD.end(), sphMarkersD2->tauXxYyZzD.begin());
thrust::copy(sphMarkersD1->tauXyXzYzD.begin(), sphMarkersD1->tauXyXzYzD.end(), sphMarkersD2->tauXyXzYzD.begin());
fsiBodiesD1->resize(numObjects->numRigidBodies);
fsiBodiesD2->resize(numObjects->numRigidBodies);
fsiBodiesH->resize(numObjects->numRigidBodies);
fsiGeneralData->rigid_FSI_ForcesD.resize(numObjects->numRigidBodies);
fsiGeneralData->rigid_FSI_TorquesD.resize(numObjects->numRigidBodies);
fsiGeneralData->rigidIdentifierD.resize(numObjects->numRigidMarkers);
fsiGeneralData->rigidSPH_MeshPos_LRF_D.resize(numObjects->numRigidMarkers);
fsiGeneralData->FlexIdentifierD.resize(numObjects->numFlexMarkers);
fsiGeneralData->FlexSPH_MeshPos_LRF_D.resize(numObjects->numFlexMarkers);
fsiGeneralData->FlexSPH_MeshPos_LRF_H.resize(numObjects->numFlexMarkers);
fsiGeneralData->CableElementsNodesD.resize(fsiGeneralData->CableElementsNodesH.size());
fsiGeneralData->ShellElementsNodesD.resize(fsiGeneralData->ShellElementsNodesH.size());
thrust::copy(fsiGeneralData->CableElementsNodesH.begin(), fsiGeneralData->CableElementsNodesH.end(),
fsiGeneralData->CableElementsNodesD.begin());
thrust::copy(fsiGeneralData->ShellElementsNodesH.begin(), fsiGeneralData->ShellElementsNodesH.end(),
fsiGeneralData->ShellElementsNodesD.begin());
fsiMeshD->resize(numObjects->numFlexNodes);
fsiMeshH->resize(numObjects->numFlexNodes);
fsiGeneralData->Flex_FSI_ForcesD.resize(numObjects->numFlexNodes);
}
//--------------------------------------------------------------------------------------------------------------------------------
struct scale_functor {
scale_functor(Real a) : m_a(a) {}
__host__ __device__ Real4 operator()(Real4& x) const { return m_a * x; }
const Real m_a;
};
thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleAccelerations() {
const auto n = numObjects->numFluidMarkers;
// Copy data for SPH particles only
thrust::device_vector<Real4> accD(n);
thrust::copy_n(fsiGeneralData->derivVelRhoD.begin(), n, accD.begin());
return accD;
}
thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleForces() {
thrust::device_vector<Real4> frcD = GetParticleAccelerations();
thrust::transform(frcD.begin(), frcD.end(), frcD.begin(), scale_functor(paramsH->markerMass));
return frcD;
}
//--------------------------------------------------------------------------------------------------------------------------------
struct in_box {
in_box() {}
__device__ bool operator()(const Real4 v) {
// Convert location in box frame
auto d = mR3(v) - pos;
auto w = mR3( //
ax.x * d.x + ax.y * d.y + ax.z * d.z, //
ay.x * d.x + ay.y * d.y + ay.z * d.z, //
az.x * d.x + az.y * d.y + az.z * d.z //
);
// Check w between all box limits
return (w.x >= -hsize.x && w.x <= +hsize.x) && (w.y >= -hsize.y && w.y <= +hsize.y) &&
(w.z >= -hsize.z && w.z <= +hsize.z);
}
Real3 hsize;
Real3 pos;
Real3 ax;
Real3 ay;
Real3 az;
};
thrust::device_vector<int> ChSystemFsi_impl::FindParticlesInBox(const Real3& hsize,
const Real3& pos,
const Real3& ax,
const Real3& ay,
const Real3& az) {
// Extract indices of SPH particles contained in the OBB
auto& ref = fsiGeneralData->referenceArray;
auto& pos_D = sphMarkersD2->posRadD;
// Find start and end locations for SPH particles (exclude ghost and BCE markers)
int haveHelper = (ref[0].z == -3) ? 1 : 0;
int haveGhost = (ref[0].z == -2 || ref[1].z == -2) ? 1 : 0;
auto sph_start = ref[haveHelper + haveGhost].x;
auto sph_end = ref[haveHelper + haveGhost].y;
auto num_sph = sph_end - sph_start;
// Preallocate output vector of indices
thrust::device_vector<int> indices_D(num_sph);
// Extract indices of SPH particles inside OBB
thrust::counting_iterator<int> first(0);
thrust::counting_iterator<int> last(num_sph);
in_box predicate;
predicate.hsize = hsize;
predicate.pos = pos;
predicate.ax = ax;
predicate.ay = ay;
predicate.az = az;
auto end = thrust::copy_if(thrust::device, // execution policy
first, last, // range of all particle indices
pos_D.begin(), // stencil vector
indices_D.begin(), // beginning of destination
predicate // predicate for stencil elements
);
// Trim the output vector of indices
size_t num_active = (size_t)(end - indices_D.begin());
indices_D.resize(num_active);
return indices_D;
}
// Gather positions from particles with specified indices
thrust::device_vector<Real4> ChSystemFsi_impl::GetParticlePositions(const thrust::device_vector<int>& indices) {
const auto& allpos = sphMarkersD2->posRadD;
thrust::device_vector<Real4> pos(allpos.size());
auto end = thrust::gather(thrust::device, // execution policy
indices.begin(), indices.end(), // range of gather locations
allpos.begin(), // beginning of source
pos.begin() // beginning of destination
);
// Trim the output vector of particle positions
size_t num_active = (size_t)(end - pos.begin());
assert(num_active == indices.size());
pos.resize(num_active);
return pos;
}
// Gather velocities from particles with specified indices
thrust::device_vector<Real3> ChSystemFsi_impl::GetParticleVelocities(const thrust::device_vector<int>& indices) {
auto allvel = sphMarkersD2->velMasD;
thrust::device_vector<Real3> vel(allvel.size());
auto end = thrust::gather(thrust::device, // execution policy
indices.begin(), indices.end(), // range of gather locations
allvel.begin(), // beginning of source
vel.begin() // beginning of destination
);
// Trim the output vector of particle positions
size_t num_active = (size_t)(end - vel.begin());
assert(num_active == indices.size());
vel.resize(num_active);
return vel;
}
// Gather accelerations from particles with specified indices
thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleAccelerations(const thrust::device_vector<int>& indices) {
auto allacc = GetParticleAccelerations();
thrust::device_vector<Real4> acc(allacc.size());
auto end = thrust::gather(thrust::device, // execution policy
indices.begin(), indices.end(), // range of gather locations
allacc.begin(), // beginning of source
acc.begin() // beginning of destination
);
// Trim the output vector of particle positions
size_t num_active = (size_t)(end - acc.begin());
assert(num_active == indices.size());
acc.resize(num_active);
return acc;
}
thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleForces(const thrust::device_vector<int>& indices) {
auto allforces = GetParticleForces();
thrust::device_vector<Real4> forces(allforces.size());
auto end = thrust::gather(thrust::device, // execution policy
indices.begin(), indices.end(), // range of gather locations
allforces.begin(), // beginning of source
forces.begin() // beginning of destination
);
// Trim the output vector of particle positions
size_t num_active = (size_t)(end - forces.begin());
assert(num_active == indices.size());
forces.resize(num_active);
return forces;
}
} // end namespace fsi
} // end namespace chrono
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.