hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
efbbe03fa5d23358aebafff2e30a45daf782d755.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include "timer.h" #include "cuda_utils.h" typedef float dtype; #define N_ (8 * 1024 * 1024) #define MAX_THREADS 256 #define MAX_BLOCKS 64 #define MIN(x,y) ((x < y) ? x : y) /* return the next power of 2 number that is larger than x */ unsigned int nextPow2( unsigned int x ) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } /* find out # of threads and # thread blocks for a particular kernel */ void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { if (whichKernel < 3) { /* 1 thread per element */ threads = (n < maxThreads) ? nextPow2(n) : maxThreads; blocks = (n + threads - 1) / threads; } else { /* 1 thread per 2 elements */ threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads; blocks = (n + (threads * 2 - 1)) / (threads * 2); } /* limit the total number of threads */ if (whichKernel == 5) blocks = MIN(maxBlocks, blocks); } /* special type of reduction to account for floating point error */ dtype reduce_cpu(dtype *data, int n) { dtype sum = data[0]; dtype c = (dtype)0.0; for (int i = 1; i < n; i++) { dtype y = data[i] - c; dtype t = sum + y; c = (t - sum) - y; sum = t; } return sum; } __global__ void kernel4(dtype *g_idata, dtype *g_odata, unsigned int n) { // int size = MAX_THREADS/2; __shared__ dtype scratch[MAX_THREADS]; unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x; unsigned int i = bid * blockDim.x * 2 + threadIdx.x; // Global Thread ID // unsigned int half = blockDim.x/2; // Cuts down threads used by half if(i + blockDim.x < n) { scratch[threadIdx.x] = g_idata[i] + g_idata[i + blockDim.x]; } else { scratch[threadIdx.x] = 0; } __syncthreads (); // One less stride unsigned int s; for(s = blockDim.x / 2; s > 32; s = s >> 1) { // Modify Here if (threadIdx.x < s) { scratch[threadIdx.x] += scratch[s + threadIdx.x]; } // ----------------- __syncthreads (); } if (threadIdx.x < s) { scratch[threadIdx.x] += scratch[s + threadIdx.x]; } // __syncthreads (); s = s >> 1; if (threadIdx.x < s) { scratch[threadIdx.x] += scratch[s + threadIdx.x]; } // __syncthreads (); s = s >> 1; if (threadIdx.x < s) { scratch[threadIdx.x] += scratch[s + threadIdx.x]; } // __syncthreads (); s = s >> 1; if (threadIdx.x < s) { scratch[threadIdx.x] += scratch[s + threadIdx.x]; } // __syncthreads (); s = s >> 1; if (threadIdx.x < s) { scratch[threadIdx.x] += scratch[s + threadIdx.x]; } // __syncthreads (); s = s >> 1; if (threadIdx.x < s) { scratch[threadIdx.x] += scratch[s + threadIdx.x]; } // __syncthreads (); if(threadIdx.x == 0) { g_odata[bid] = scratch[0]; } } int main(int argc, char** argv) { int i; /* data structure */ dtype *h_idata, h_odata, h_cpu; dtype *d_idata, *d_odata; /* timer */ struct stopwatch_t* timer = NULL; long double t_kernel_4, t_cpu; /* which kernel are we running */ int whichKernel; /* number of threads and thread blocks */ int threads, blocks; int N; if(argc > 1) { N = atoi (argv[1]); printf("N: %d\n", N); } else { N = N_; printf("N: %d\n", N); } /* naive kernel */ whichKernel = 4; getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS, blocks, threads); /* initialize timer */ stopwatch_init (); timer = stopwatch_create (); /* allocate memory */ h_idata = (dtype*) malloc (N * sizeof (dtype)); CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * sizeof (dtype))); CUDA_CHECK_ERROR (hipMalloc (&d_odata, blocks * sizeof (dtype))); /* Initialize array */ srand48(time(NULL)); for(i = 0; i < N; i++) { h_idata[i] = drand48() / 100000; } CUDA_CHECK_ERROR (hipMemcpy (d_idata, h_idata, N * sizeof (dtype), hipMemcpyHostToDevice)); /* ================================================== */ /* GPU kernel */ dim3 gb(blocks, 1, 1); dim3 tb(threads, 1, 1); /* warm up */ hipLaunchKernelGGL(( kernel4) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N); hipDeviceSynchronize (); stopwatch_start (timer); /* execute kernel */ hipLaunchKernelGGL(( kernel4) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N); int s = blocks; while(s > 1) { threads = 0; blocks = 0; getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS, blocks, threads); dim3 gb(blocks, 1, 1); dim3 tb(threads, 1, 1); hipLaunchKernelGGL(( kernel4) , dim3(gb), dim3(tb), 0, 0, d_odata, d_odata, s); s = (s + threads * 2 - 1) / (threads * 2); } hipDeviceSynchronize (); t_kernel_4 = stopwatch_stop (timer); fprintf (stdout, "Time to execute unrolled GPU reduction kernel: %Lg secs\n", t_kernel_4); double bw = (N * sizeof(dtype)) / (t_kernel_4 * 1e9); fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw); /* copy result back from GPU */ CUDA_CHECK_ERROR (hipMemcpy (&h_odata, d_odata, sizeof (dtype), hipMemcpyDeviceToHost)); /* ================================================== */ /* ================================================== */ /* CPU kernel */ stopwatch_start (timer); h_cpu = reduce_cpu (h_idata, N); t_cpu = stopwatch_stop (timer); fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n", t_cpu); /* ================================================== */ if(abs (h_odata - h_cpu) > 1e-5) { fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu); } else { printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu); } return 0; }
efbbe03fa5d23358aebafff2e30a45daf782d755.cu
#include <stdlib.h> #include <stdio.h> #include "timer.h" #include "cuda_utils.h" typedef float dtype; #define N_ (8 * 1024 * 1024) #define MAX_THREADS 256 #define MAX_BLOCKS 64 #define MIN(x,y) ((x < y) ? x : y) /* return the next power of 2 number that is larger than x */ unsigned int nextPow2( unsigned int x ) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } /* find out # of threads and # thread blocks for a particular kernel */ void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { if (whichKernel < 3) { /* 1 thread per element */ threads = (n < maxThreads) ? nextPow2(n) : maxThreads; blocks = (n + threads - 1) / threads; } else { /* 1 thread per 2 elements */ threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads; blocks = (n + (threads * 2 - 1)) / (threads * 2); } /* limit the total number of threads */ if (whichKernel == 5) blocks = MIN(maxBlocks, blocks); } /* special type of reduction to account for floating point error */ dtype reduce_cpu(dtype *data, int n) { dtype sum = data[0]; dtype c = (dtype)0.0; for (int i = 1; i < n; i++) { dtype y = data[i] - c; dtype t = sum + y; c = (t - sum) - y; sum = t; } return sum; } __global__ void kernel4(dtype *g_idata, dtype *g_odata, unsigned int n) { // int size = MAX_THREADS/2; __shared__ dtype scratch[MAX_THREADS]; unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x; unsigned int i = bid * blockDim.x * 2 + threadIdx.x; // Global Thread ID // unsigned int half = blockDim.x/2; // Cuts down threads used by half if(i + blockDim.x < n) { scratch[threadIdx.x] = g_idata[i] + g_idata[i + blockDim.x]; } else { scratch[threadIdx.x] = 0; } __syncthreads (); // One less stride unsigned int s; for(s = blockDim.x / 2; s > 32; s = s >> 1) { // Modify Here if (threadIdx.x < s) { scratch[threadIdx.x] += scratch[s + threadIdx.x]; } // ----------------- __syncthreads (); } if (threadIdx.x < s) { scratch[threadIdx.x] += scratch[s + threadIdx.x]; } // __syncthreads (); s = s >> 1; if (threadIdx.x < s) { scratch[threadIdx.x] += scratch[s + threadIdx.x]; } // __syncthreads (); s = s >> 1; if (threadIdx.x < s) { scratch[threadIdx.x] += scratch[s + threadIdx.x]; } // __syncthreads (); s = s >> 1; if (threadIdx.x < s) { scratch[threadIdx.x] += scratch[s + threadIdx.x]; } // __syncthreads (); s = s >> 1; if (threadIdx.x < s) { scratch[threadIdx.x] += scratch[s + threadIdx.x]; } // __syncthreads (); s = s >> 1; if (threadIdx.x < s) { scratch[threadIdx.x] += scratch[s + threadIdx.x]; } // __syncthreads (); if(threadIdx.x == 0) { g_odata[bid] = scratch[0]; } } int main(int argc, char** argv) { int i; /* data structure */ dtype *h_idata, h_odata, h_cpu; dtype *d_idata, *d_odata; /* timer */ struct stopwatch_t* timer = NULL; long double t_kernel_4, t_cpu; /* which kernel are we running */ int whichKernel; /* number of threads and thread blocks */ int threads, blocks; int N; if(argc > 1) { N = atoi (argv[1]); printf("N: %d\n", N); } else { N = N_; printf("N: %d\n", N); } /* naive kernel */ whichKernel = 4; getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS, blocks, threads); /* initialize timer */ stopwatch_init (); timer = stopwatch_create (); /* allocate memory */ h_idata = (dtype*) malloc (N * sizeof (dtype)); CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * sizeof (dtype))); CUDA_CHECK_ERROR (cudaMalloc (&d_odata, blocks * sizeof (dtype))); /* Initialize array */ srand48(time(NULL)); for(i = 0; i < N; i++) { h_idata[i] = drand48() / 100000; } CUDA_CHECK_ERROR (cudaMemcpy (d_idata, h_idata, N * sizeof (dtype), cudaMemcpyHostToDevice)); /* ================================================== */ /* GPU kernel */ dim3 gb(blocks, 1, 1); dim3 tb(threads, 1, 1); /* warm up */ kernel4 <<<gb, tb>>> (d_idata, d_odata, N); cudaThreadSynchronize (); stopwatch_start (timer); /* execute kernel */ kernel4 <<<gb, tb>>> (d_idata, d_odata, N); int s = blocks; while(s > 1) { threads = 0; blocks = 0; getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS, blocks, threads); dim3 gb(blocks, 1, 1); dim3 tb(threads, 1, 1); kernel4 <<<gb, tb>>> (d_odata, d_odata, s); s = (s + threads * 2 - 1) / (threads * 2); } cudaThreadSynchronize (); t_kernel_4 = stopwatch_stop (timer); fprintf (stdout, "Time to execute unrolled GPU reduction kernel: %Lg secs\n", t_kernel_4); double bw = (N * sizeof(dtype)) / (t_kernel_4 * 1e9); fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw); /* copy result back from GPU */ CUDA_CHECK_ERROR (cudaMemcpy (&h_odata, d_odata, sizeof (dtype), cudaMemcpyDeviceToHost)); /* ================================================== */ /* ================================================== */ /* CPU kernel */ stopwatch_start (timer); h_cpu = reduce_cpu (h_idata, N); t_cpu = stopwatch_stop (timer); fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n", t_cpu); /* ================================================== */ if(abs (h_odata - h_cpu) > 1e-5) { fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu); } else { printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu); } return 0; }
c32efad7a435d3107d15b55180923ff7e9476cf1.hip
// !!! This is a file automatically generated by hipify!!! // This file is auto-generated. See "generate_kernels.sh" #include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h> INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM50(cutlass::half_t, true); INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM70(cutlass::half_t, true); INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM75(cutlass::half_t, true); INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM80(cutlass::half_t, true);
c32efad7a435d3107d15b55180923ff7e9476cf1.cu
// This file is auto-generated. See "generate_kernels.sh" #include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h> INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM50(cutlass::half_t, true); INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM70(cutlass::half_t, true); INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM75(cutlass::half_t, true); INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM80(cutlass::half_t, true);
a1716a64f47fa73efc1f5f388f21143b99f92f61.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // You can use any other block size you wish. #define BLOCK_SIZE 512 #define BLOCK_DUB 1024 //Works for power of 2 elements #define DEFAULT_NUM_ELEMENTS 1024 #define MAX_RAND 2 typedef float REAL; __global__ void prescan(REAL *odata, REAL *idata, int num) { volatile __shared__ REAL temp[BLOCK_DUB]; //Set up some convenient variables int ti = threadIdx.x; int bid = blockIdx.x + blockIdx.y*gridDim.x; int index = bid*blockDim.x + ti; int ofs = 1; int mult = DEFAULT_NUM_ELEMENTS/num; int top = mult*(2*(index+1))-1; if (top < DEFAULT_NUM_ELEMENTS) { temp[2*ti] = idata[2*index*mult+mult-1]; temp[2*ti+1] = idata[top]; } else { temp[2*ti+1] = 0; if (top == DEFAULT_NUM_ELEMENTS) { temp[2*ti] = idata[2*index*mult+mult-1]; } else { temp[2*ti] = 0; } } for (int i = BLOCK_SIZE; i>0; i>>=1) { __syncthreads(); if (ti<i) { int ai = ofs*(2*ti+1)-1; int bi = ofs*(2*ti+2)-1; temp[bi] += temp[ai]; } ofs <<= 1; } __syncthreads(); if (top < DEFAULT_NUM_ELEMENTS) { idata[2*index*mult+mult-1] = temp[2*ti]; idata[top] = temp[2*ti+1]; } else { if (top == DEFAULT_NUM_ELEMENTS) { idata[2*index*mult+mult-1] = temp[2*ti]; } } } __global__ void downsweep(REAL *odata, REAL *idata, int num, int last) { volatile __shared__ REAL tempd[BLOCK_DUB]; //Set up some convenient variables int ti = threadIdx.x; int bid = blockIdx.x + blockIdx.y*gridDim.x; int index = bid*blockDim.x + ti; int ofs = BLOCK_DUB; int mult = DEFAULT_NUM_ELEMENTS/num; int top = mult*(2*(index+1))-1; if (top < DEFAULT_NUM_ELEMENTS) { tempd[2*ti] = idata[2*index*mult+mult-1]; tempd[2*ti+1] = idata[top]; } else { tempd[2*ti+1] = 0; if (top == DEFAULT_NUM_ELEMENTS) { tempd[2*ti] = idata[2*index*mult+mult-1]; } else { tempd[2*ti] = 0; } } if (last == 1) { tempd[num-1] = 0; } for (int j = 1; j<BLOCK_DUB; j<<=1) //fix { ofs >>= 1; __syncthreads(); if (ti < j) { int ai = ofs*(2*ti+1)-1; int bi = ofs*(2*ti+2)-1; REAL temp2 = tempd[ai]; tempd[ai] = tempd[bi]; tempd[bi] += temp2; } } __syncthreads(); if (last == 1) { if (top < DEFAULT_NUM_ELEMENTS) { odata[2*index*mult+mult-1] = tempd[2*ti]; odata[top] = tempd[2*ti+1]; } else { if (top == DEFAULT_NUM_ELEMENTS) { odata[2*index*mult+mult-1] = tempd[2*ti]; } } } else { if (top < DEFAULT_NUM_ELEMENTS) { idata[2*index*mult+mult-1] = tempd[2*ti]; idata[top] = tempd[2*ti+1]; } else { if (top == DEFAULT_NUM_ELEMENTS) { idata[2*index*mult+mult-1] = tempd[2*ti]; } } } } // **===-------- Modify the body of this function -----------===** // You may need to make multiple kernel calls. void prescanArray(REAL *outArray, REAL *inArray, int numElements) { //Use kernel to compute the reduction int blocksx, blocksy, blocks; int threads = BLOCK_SIZE; int nestElements = numElements; int lastElements; blocksx = (nestElements+BLOCK_DUB-1)/(threads*2); blocks = blocksx; blocksy = 1; if (blocksx > 65535) { blocksy = (blocksx+65534)/65535; blocksx = 65535; } dim3 dimGrid(blocksx,blocksy); while(nestElements > 1) { // Recursive implementation to compute the reduction hipLaunchKernelGGL(( prescan) , dim3(dimGrid),dim3(threads), 0, 0, outArray, inArray, nestElements); lastElements = nestElements; nestElements = blocks; blocksx = (nestElements+BLOCK_DUB-1)/(threads*2); blocks = blocksx; blocksy = 1; if (blocksx > 65535) { blocksy = (blocksx+65534)/65535; blocksx = 65535; } dim3 dimGrid(blocksx, blocksy); } //fix nestElements = lastElements; blocksx = (nestElements+BLOCK_DUB-1)/(threads*2); blocks = blocksx; blocksy = 1; if (blocksx > 65535) { blocksy = (blocksx+65534)/65535; blocksx = 65535; } dim3 dimGrid2(blocksx,blocksy); while(nestElements <= DEFAULT_NUM_ELEMENTS) { //printf("%d \n",nestElements); // Recursive implementation to compute the downsweep if (nestElements == DEFAULT_NUM_ELEMENTS) { hipLaunchKernelGGL(( downsweep) , dim3(dimGrid2),dim3(threads), 0, 0, outArray, inArray, nestElements, 1); nestElements = DEFAULT_NUM_ELEMENTS+1; //fix } else { hipLaunchKernelGGL(( downsweep) , dim3(dimGrid2),dim3(threads), 0, 0, outArray, inArray, nestElements, 0); nestElements = blocks*nestElements; //fix } blocksx = (nestElements+BLOCK_DUB-1)/(threads*2); blocks = blocksx; blocksy = 1; if (blocksx > 65535) { blocksy = (blocksx+65534)/65535; blocksx = 65535; } dim3 dimGrid2(blocksx, blocksy); } //downsweep <<<1,BLOCK_SIZE>>>(outArray, inArray, numElements); } // **===-----------------------------------------------------------===** //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" unsigned int compare( const REAL* reference, const REAL* data, const unsigned int len); extern "C" void computeGold( REAL* reference, REAL* idata, const unsigned int len); unsigned int cutComparef( REAL *reference, REAL *h_data, int num_elements, REAL err); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { float device_time; float host_time; int num_elements = 0; // Must support large, non-power-of-2 arrays // allocate host memory to store the input data unsigned int mem_size = sizeof( REAL) * num_elements; REAL* h_data = (REAL*) malloc( mem_size); switch(argc-1) { case 0: num_elements = DEFAULT_NUM_ELEMENTS; // allocate host memory to store the input data mem_size = sizeof( REAL) * num_elements; h_data = (REAL*) malloc( mem_size); // initialize the input data on the host for( unsigned int i = 0; i < num_elements; ++i) { // h_data[i] = 1.0f; h_data[i] = (int)(rand() % MAX_RAND); } break; default: num_elements = atoi(argv[1]); // allocate host memory to store the input data mem_size = sizeof( REAL) * num_elements; h_data = (REAL*) malloc( mem_size); // initialize the input data on the host for( unsigned int i = 0; i < num_elements; ++i) { // h_data[i] = 1.0f; h_data[i] = (int)(rand() % MAX_RAND); } break; } hipEvent_t time_start; hipEvent_t time_end; hipEventCreate(&time_start); hipEventCreate(&time_end); // compute reference solution REAL* reference = (REAL*) malloc( mem_size); // cutStartTimer(timer); hipEventRecord(time_start, 0); computeGold( reference, h_data, num_elements); hipEventRecord(time_end, 0); hipEventSynchronize(time_end); hipEventElapsedTime(&host_time, time_start, time_end); // cutStopTimer(timer); printf("\n\n**===-------------------------------------------------===**\n"); printf("Processing %d elements...\n", num_elements); printf("Host CPU Processing time: %f (ms)\n", host_time); // allocate device memory input and output arrays REAL* d_idata = NULL; REAL* d_odata = NULL; hipMalloc( (void**) &d_idata, mem_size); hipMalloc( (void**) &d_odata, mem_size); // copy host memory to device input array hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice); // initialize all the other device arrays to be safe hipMemcpy( d_odata, h_data, mem_size, hipMemcpyHostToDevice); // **===-------- Allocate data structure here -----------===** // preallocBlockSums(num_elements); // **===-----------------------------------------------------------===** // Run just once to remove startup overhead for more accurate performance // measurement //prescanArray(d_odata, d_idata, 16); // Run the prescan // CUT_SAFE_CALL(cutCreateTimer(&timer)); // cutStartTimer(timer); hipEventRecord(time_start, 0); // **===-------- Modify the body of this function -----------===** prescanArray(d_odata, d_idata, num_elements); // **===-----------------------------------------------------------===** hipDeviceSynchronize(); hipEventRecord(time_end, 0); hipEventSynchronize(time_end); hipEventElapsedTime(&device_time, time_start, time_end); hipEventDestroy(time_start); hipEventDestroy(time_end); // cutStopTimer(timer); printf("CUDA Processing time: %g (ms)\n", device_time); // device_time = cutGetTimerValue(timer); // printf("Speedup: %fX\n", host_time/device_time); // **===-------- Deallocate data structure here -----------===** // deallocBlockSums(); // **===-----------------------------------------------------------===** // copy result from device to host hipMemcpy( h_data, d_odata, sizeof(REAL) * num_elements, hipMemcpyDeviceToHost); // Check if the result is equivalent to the expected soluion unsigned int result_regtest = cutComparef( reference, h_data, num_elements, 1e-7); printf( "Test %s\n", (0 == result_regtest) ? "FAILED" : "PASSED"); // cleanup memory free( h_data); free( reference); hipFree( d_odata); hipFree( d_idata); } unsigned int cutComparef( REAL *reference, REAL *h_data, int num_elements, REAL err) { int i; int diff_count = 0; for (i = 0; i < num_elements; i++) { REAL diff = fabs(reference[i] - h_data[i]); REAL denominator = 1.f; if (denominator < fabs(reference[i])) { denominator = fabs(reference[i]); } if (!(diff / denominator < err)) { diff_count ++; } } if (diff_count > 0) { printf("Number of difference: %d\n", diff_count); return 0; } else { return 1; } }
a1716a64f47fa73efc1f5f388f21143b99f92f61.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // You can use any other block size you wish. #define BLOCK_SIZE 512 #define BLOCK_DUB 1024 //Works for power of 2 elements #define DEFAULT_NUM_ELEMENTS 1024 #define MAX_RAND 2 typedef float REAL; __global__ void prescan(REAL *odata, REAL *idata, int num) { volatile __shared__ REAL temp[BLOCK_DUB]; //Set up some convenient variables int ti = threadIdx.x; int bid = blockIdx.x + blockIdx.y*gridDim.x; int index = bid*blockDim.x + ti; int ofs = 1; int mult = DEFAULT_NUM_ELEMENTS/num; int top = mult*(2*(index+1))-1; if (top < DEFAULT_NUM_ELEMENTS) { temp[2*ti] = idata[2*index*mult+mult-1]; temp[2*ti+1] = idata[top]; } else { temp[2*ti+1] = 0; if (top == DEFAULT_NUM_ELEMENTS) { temp[2*ti] = idata[2*index*mult+mult-1]; } else { temp[2*ti] = 0; } } for (int i = BLOCK_SIZE; i>0; i>>=1) { __syncthreads(); if (ti<i) { int ai = ofs*(2*ti+1)-1; int bi = ofs*(2*ti+2)-1; temp[bi] += temp[ai]; } ofs <<= 1; } __syncthreads(); if (top < DEFAULT_NUM_ELEMENTS) { idata[2*index*mult+mult-1] = temp[2*ti]; idata[top] = temp[2*ti+1]; } else { if (top == DEFAULT_NUM_ELEMENTS) { idata[2*index*mult+mult-1] = temp[2*ti]; } } } __global__ void downsweep(REAL *odata, REAL *idata, int num, int last) { volatile __shared__ REAL tempd[BLOCK_DUB]; //Set up some convenient variables int ti = threadIdx.x; int bid = blockIdx.x + blockIdx.y*gridDim.x; int index = bid*blockDim.x + ti; int ofs = BLOCK_DUB; int mult = DEFAULT_NUM_ELEMENTS/num; int top = mult*(2*(index+1))-1; if (top < DEFAULT_NUM_ELEMENTS) { tempd[2*ti] = idata[2*index*mult+mult-1]; tempd[2*ti+1] = idata[top]; } else { tempd[2*ti+1] = 0; if (top == DEFAULT_NUM_ELEMENTS) { tempd[2*ti] = idata[2*index*mult+mult-1]; } else { tempd[2*ti] = 0; } } if (last == 1) { tempd[num-1] = 0; } for (int j = 1; j<BLOCK_DUB; j<<=1) //fix { ofs >>= 1; __syncthreads(); if (ti < j) { int ai = ofs*(2*ti+1)-1; int bi = ofs*(2*ti+2)-1; REAL temp2 = tempd[ai]; tempd[ai] = tempd[bi]; tempd[bi] += temp2; } } __syncthreads(); if (last == 1) { if (top < DEFAULT_NUM_ELEMENTS) { odata[2*index*mult+mult-1] = tempd[2*ti]; odata[top] = tempd[2*ti+1]; } else { if (top == DEFAULT_NUM_ELEMENTS) { odata[2*index*mult+mult-1] = tempd[2*ti]; } } } else { if (top < DEFAULT_NUM_ELEMENTS) { idata[2*index*mult+mult-1] = tempd[2*ti]; idata[top] = tempd[2*ti+1]; } else { if (top == DEFAULT_NUM_ELEMENTS) { idata[2*index*mult+mult-1] = tempd[2*ti]; } } } } // **===-------- Modify the body of this function -----------===** // You may need to make multiple kernel calls. void prescanArray(REAL *outArray, REAL *inArray, int numElements) { //Use kernel to compute the reduction int blocksx, blocksy, blocks; int threads = BLOCK_SIZE; int nestElements = numElements; int lastElements; blocksx = (nestElements+BLOCK_DUB-1)/(threads*2); blocks = blocksx; blocksy = 1; if (blocksx > 65535) { blocksy = (blocksx+65534)/65535; blocksx = 65535; } dim3 dimGrid(blocksx,blocksy); while(nestElements > 1) { // Recursive implementation to compute the reduction prescan <<<dimGrid,threads>>> (outArray, inArray, nestElements); lastElements = nestElements; nestElements = blocks; blocksx = (nestElements+BLOCK_DUB-1)/(threads*2); blocks = blocksx; blocksy = 1; if (blocksx > 65535) { blocksy = (blocksx+65534)/65535; blocksx = 65535; } dim3 dimGrid(blocksx, blocksy); } //fix nestElements = lastElements; blocksx = (nestElements+BLOCK_DUB-1)/(threads*2); blocks = blocksx; blocksy = 1; if (blocksx > 65535) { blocksy = (blocksx+65534)/65535; blocksx = 65535; } dim3 dimGrid2(blocksx,blocksy); while(nestElements <= DEFAULT_NUM_ELEMENTS) { //printf("%d \n",nestElements); // Recursive implementation to compute the downsweep if (nestElements == DEFAULT_NUM_ELEMENTS) { downsweep <<<dimGrid2,threads>>> (outArray, inArray, nestElements, 1); nestElements = DEFAULT_NUM_ELEMENTS+1; //fix } else { downsweep <<<dimGrid2,threads>>> (outArray, inArray, nestElements, 0); nestElements = blocks*nestElements; //fix } blocksx = (nestElements+BLOCK_DUB-1)/(threads*2); blocks = blocksx; blocksy = 1; if (blocksx > 65535) { blocksy = (blocksx+65534)/65535; blocksx = 65535; } dim3 dimGrid2(blocksx, blocksy); } //downsweep <<<1,BLOCK_SIZE>>>(outArray, inArray, numElements); } // **===-----------------------------------------------------------===** //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" unsigned int compare( const REAL* reference, const REAL* data, const unsigned int len); extern "C" void computeGold( REAL* reference, REAL* idata, const unsigned int len); unsigned int cutComparef( REAL *reference, REAL *h_data, int num_elements, REAL err); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { float device_time; float host_time; int num_elements = 0; // Must support large, non-power-of-2 arrays // allocate host memory to store the input data unsigned int mem_size = sizeof( REAL) * num_elements; REAL* h_data = (REAL*) malloc( mem_size); switch(argc-1) { case 0: num_elements = DEFAULT_NUM_ELEMENTS; // allocate host memory to store the input data mem_size = sizeof( REAL) * num_elements; h_data = (REAL*) malloc( mem_size); // initialize the input data on the host for( unsigned int i = 0; i < num_elements; ++i) { // h_data[i] = 1.0f; h_data[i] = (int)(rand() % MAX_RAND); } break; default: num_elements = atoi(argv[1]); // allocate host memory to store the input data mem_size = sizeof( REAL) * num_elements; h_data = (REAL*) malloc( mem_size); // initialize the input data on the host for( unsigned int i = 0; i < num_elements; ++i) { // h_data[i] = 1.0f; h_data[i] = (int)(rand() % MAX_RAND); } break; } cudaEvent_t time_start; cudaEvent_t time_end; cudaEventCreate(&time_start); cudaEventCreate(&time_end); // compute reference solution REAL* reference = (REAL*) malloc( mem_size); // cutStartTimer(timer); cudaEventRecord(time_start, 0); computeGold( reference, h_data, num_elements); cudaEventRecord(time_end, 0); cudaEventSynchronize(time_end); cudaEventElapsedTime(&host_time, time_start, time_end); // cutStopTimer(timer); printf("\n\n**===-------------------------------------------------===**\n"); printf("Processing %d elements...\n", num_elements); printf("Host CPU Processing time: %f (ms)\n", host_time); // allocate device memory input and output arrays REAL* d_idata = NULL; REAL* d_odata = NULL; cudaMalloc( (void**) &d_idata, mem_size); cudaMalloc( (void**) &d_odata, mem_size); // copy host memory to device input array cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice); // initialize all the other device arrays to be safe cudaMemcpy( d_odata, h_data, mem_size, cudaMemcpyHostToDevice); // **===-------- Allocate data structure here -----------===** // preallocBlockSums(num_elements); // **===-----------------------------------------------------------===** // Run just once to remove startup overhead for more accurate performance // measurement //prescanArray(d_odata, d_idata, 16); // Run the prescan // CUT_SAFE_CALL(cutCreateTimer(&timer)); // cutStartTimer(timer); cudaEventRecord(time_start, 0); // **===-------- Modify the body of this function -----------===** prescanArray(d_odata, d_idata, num_elements); // **===-----------------------------------------------------------===** cudaThreadSynchronize(); cudaEventRecord(time_end, 0); cudaEventSynchronize(time_end); cudaEventElapsedTime(&device_time, time_start, time_end); cudaEventDestroy(time_start); cudaEventDestroy(time_end); // cutStopTimer(timer); printf("CUDA Processing time: %g (ms)\n", device_time); // device_time = cutGetTimerValue(timer); // printf("Speedup: %fX\n", host_time/device_time); // **===-------- Deallocate data structure here -----------===** // deallocBlockSums(); // **===-----------------------------------------------------------===** // copy result from device to host cudaMemcpy( h_data, d_odata, sizeof(REAL) * num_elements, cudaMemcpyDeviceToHost); // Check if the result is equivalent to the expected soluion unsigned int result_regtest = cutComparef( reference, h_data, num_elements, 1e-7); printf( "Test %s\n", (0 == result_regtest) ? "FAILED" : "PASSED"); // cleanup memory free( h_data); free( reference); cudaFree( d_odata); cudaFree( d_idata); } unsigned int cutComparef( REAL *reference, REAL *h_data, int num_elements, REAL err) { int i; int diff_count = 0; for (i = 0; i < num_elements; i++) { REAL diff = fabs(reference[i] - h_data[i]); REAL denominator = 1.f; if (denominator < fabs(reference[i])) { denominator = fabs(reference[i]); } if (!(diff / denominator < err)) { diff_count ++; } } if (diff_count > 0) { printf("Number of difference: %d\n", diff_count); return 0; } else { return 1; } }
59e6310790ef151fc46606d7600bf3a4d22634b8.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> //#include <cutil.h> #include <iostream> #include <ostream> #include <fstream> //#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h" using namespace std; #define BLOCKSIZEX 256 #define BLOCKSIZEY 1 #define BLOCKSIZEZ 1 #define XDIM 256 #define YDIM 128 #define ZDIM 32 #define TMAX 1000 #define RE 100.f//100.f; #define UMAX 0.08f #define METHOD "SHARED" //SINGLE,HYB,TEXT,SHARED //#define CHARLENGTH = XDIM-2.f; //#define BLOCKSIZE 16; //int const XDIM = 32; //int const YDIM = 32; #include <sys/time.h> #include <time.h> inline __device__ int ImageFcn(int x, int y, int z){ if(y == 0 || z == 0 || x == XDIM-1 || y == YDIM-1 || z == ZDIM-1) return 1; else if(x == 0) return 3; else return 0; } texture<float,2,hipReadModeElementType> texRef_f0A; texture<float,2,hipReadModeElementType> texRef_f1A; texture<float,2,hipReadModeElementType> texRef_f2A; texture<float,2,hipReadModeElementType> texRef_f3A; texture<float,2,hipReadModeElementType> texRef_f4A; texture<float,2,hipReadModeElementType> texRef_f5A; texture<float,2,hipReadModeElementType> texRef_f6A; texture<float,2,hipReadModeElementType> texRef_f7A; texture<float,2,hipReadModeElementType> texRef_f8A; texture<float,2,hipReadModeElementType> texRef_f9A; texture<float,2,hipReadModeElementType> texRef_f10A; texture<float,2,hipReadModeElementType> texRef_f11A; texture<float,2,hipReadModeElementType> texRef_f12A; texture<float,2,hipReadModeElementType> texRef_f13A; texture<float,2,hipReadModeElementType> texRef_f14A; texture<float,2,hipReadModeElementType> texRef_f15A; texture<float,2,hipReadModeElementType> texRef_f16A; texture<float,2,hipReadModeElementType> texRef_f17A; texture<float,2,hipReadModeElementType> texRef_f18A; texture<float,2,hipReadModeElementType> texRef_f0B; texture<float,2,hipReadModeElementType> texRef_f1B; texture<float,2,hipReadModeElementType> texRef_f2B; texture<float,2,hipReadModeElementType> texRef_f3B; texture<float,2,hipReadModeElementType> texRef_f4B; texture<float,2,hipReadModeElementType> texRef_f5B; texture<float,2,hipReadModeElementType> texRef_f6B; texture<float,2,hipReadModeElementType> texRef_f7B; texture<float,2,hipReadModeElementType> texRef_f8B; texture<float,2,hipReadModeElementType> texRef_f9B; texture<float,2,hipReadModeElementType> texRef_f10B; texture<float,2,hipReadModeElementType> texRef_f11B; texture<float,2,hipReadModeElementType> texRef_f12B; texture<float,2,hipReadModeElementType> texRef_f13B; texture<float,2,hipReadModeElementType> texRef_f14B; texture<float,2,hipReadModeElementType> texRef_f15B; texture<float,2,hipReadModeElementType> texRef_f16B; texture<float,2,hipReadModeElementType> texRef_f17B; texture<float,2,hipReadModeElementType> texRef_f18B; int timeval_subtract (double *result, struct timeval *x, struct timeval *y) { struct timeval result0; /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (y->tv_usec - x->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. tv_usec is certainly positive. */ result0.tv_sec = x->tv_sec - y->tv_sec; result0.tv_usec = x->tv_usec - y->tv_usec; *result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } inline __device__ void bgk_collide(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float omega) { float rho,u,v,w; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+ f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; // float usqr = u*u+v*v+w*w; float usqr = fma(u,u,fma(v,v,w*w)); // f0 -= omega*fma(-0.3333333333f,(fma(-1.5f,usqr,rho)),f0);//(f0 -0.3333333333f*(fma(-1.5f,usqr,rho)));//rho-1.5f*usqr)); // f1 -= omega*fma(-0.0555555556f,fma(3.0f, u ,rho)+fma(4.5f,u*u,-1.5f*usqr),f1);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f2 -= omega*fma(-0.0555555556f,fma(3.0f, v ,rho)+fma(4.5f,v*v,-1.5f*usqr),f2);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f3 -= omega*fma(-0.0555555556f,fma(3.0f, u ,rho)+fma(4.5f,u*u,-1.5f*usqr),f3);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f4 -= omega*fma(-0.0555555556f,fma(3.0f, v ,rho)+fma(4.5f,v*v,-1.5f*usqr),f4);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f5 -= omega*fma(-0.0555555556f,fma(3.0f,( u+v),rho)+fma(4.5f,( u+v)*( u+v),-1.5f*usqr),f5 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f6 -= omega*fma(-0.0555555556f,fma(3.0f,(-u+v),rho)+fma(4.5f,(-u+v)*(-u+v),-1.5f*usqr),f6 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f7 -= omega*fma(-0.0555555556f,fma(3.0f,(-u-v),rho)+fma(4.5f,(-u-v)*(-u-v),-1.5f*usqr),f7 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f8 -= omega*fma(-0.0555555556f,fma(3.0f,( u-v),rho)+fma(4.5f,( u-v)*( u-v),-1.5f*usqr),f8 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f9 -= omega*fma(-0.0555555556f,fma(3.0f,( w),rho)+fma(4.5f,( w)*( w),-1.5f*usqr),f9 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f10-= omega*fma(-0.0277777778f,fma(3.0f,( u+w),rho)+fma(4.5f,( u+w)*( u+w),-1.5f*usqr),f10);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f11-= omega*fma(-0.0277777778f,fma(3.0f,( v+w),rho)+fma(4.5f,( v+w)*( v+w),-1.5f*usqr),f11);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f12-= omega*fma(-0.0277777778f,fma(3.0f,(-u+w),rho)+fma(4.5f,(-u+w)*(-u+w),-1.5f*usqr),f12);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f13-= omega*fma(-0.0277777778f,fma(3.0f,(-v+w),rho)+fma(4.5f,(-v+w)*(-v+w),-1.5f*usqr),f13);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f14-= omega*fma(-0.0555555556f,fma(3.0f,( -w),rho)+fma(4.5f,( -w)*( -w),-1.5f*usqr),f14);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f15-= omega*fma(-0.0277777778f,fma(3.0f,( u-w),rho)+fma(4.5f,( u-w)*( u-w),-1.5f*usqr),f15);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f16-= omega*fma(-0.0277777778f,fma(3.0f,( v-w),rho)+fma(4.5f,( v-w)*( v-w),-1.5f*usqr),f16);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f17-= omega*fma(-0.0277777778f,fma(3.0f,(-u-w),rho)+fma(4.5f,(-u-w)*(-u-w),-1.5f*usqr),f17);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f18-= omega*fma(-0.0277777778f,fma(3.0f,(-v-w),rho)+fma(4.5f,(-v-w)*(-v-w),-1.5f*usqr),f18);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); f0 = f0 -omega*(f0 -0.3333333333f*(rho-1.5f*usqr)); f1 = f1 -omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); f2 = f2 -omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr)); f3 = f3 -omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr)); f4 = f4 -omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr)); f5 = f5 -omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)); f6 = f6 -omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr)); f7 = f7 -omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr)); f8 = f8 -omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)); f9 = f9 -omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr)); f10= f10-omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)); f11= f11-omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr)); f12= f12-omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr)); f13= f13-omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr)); f14= f14-omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr)); f15= f15-omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)); f16= f16-omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr)); f17= f17-omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr)); f18= f18-omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr)); } inline __device__ void mrt_collide(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float omega) { //float rho,u,v,w; float u,v,w; // rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+ // f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18; //m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18; //m1 = -30.f*f0+-11.f*(f1+f2+f3+f4+f9+f14)+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18); //m1 = -19.f*f0+ 19.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18); //m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ; //m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18; //m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18; //COMPUTE M-MEQ m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18); m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w); m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17; m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18; m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18; m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18 -(2.f*u*u-(v*v+w*w)); m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18; m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 -(v*v-w*w); m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ; m13 = f5+ - f6+ f7+ - f8 -u*v; m14 = f11 + - f13 + - f16 + f18 -v*w; m15 = f10 + - f12 + - f15 + f17 -u*w; m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ; m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18; m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18; //m1 -= -11.f*rho+19.f*(u*u+v*v+w*w); // m1 -= 19.f*(u*u+v*v+w*w); // m2 -= -7.53968254f*(u*u+v*v+w*w); //m4 -= -0.66666667f*u;//qx_eq //m6 -= -0.66666667f*v;//qx_eq //m8 -= -0.66666667f*w;//qx_eq // m9 -= (2.f*u*u-(v*v+w*w));//(2.f*.f*.f-(u1*u1+u2*u2));///3.f;//pxx_eq // m11-= (v*v-w*w);//pww_eq // m13-= u*v;//pxy_eq // m14-= v*w;//pyz_eq // m15-= u*w;//pxz_eq f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2); f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10); f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12); f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10); f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12); f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))); f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))); f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))); f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))); f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12); f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))); f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14))); f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))); f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14))); f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12); f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))); f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14))); f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))); f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14))); } inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch) { return (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*ZDIM; } __device__ int dmin(int a, int b) { if (a<b) return a; else return b-1; } __device__ int dmax(int a) { if (a>-1) return a; else return 0; } __global__ void simple_copy(float* fA, float* fB, int *image, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) fB[j] = fA[j];//+0.01f; } __global__ void mrt_d_hybAB(float* fin, float* fout, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y;//; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fin[j]; f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)]; f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)]; f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)]; f11= fin[f_mem(11,x ,y-1,z-1,pitch)]; f13= fin[f_mem(13,x ,y+1,z-1,pitch)]; f14= fin[f_mem(14,x ,y ,z+1,pitch)]; f16= fin[f_mem(16,x ,y-1,z+1,pitch)]; if(z != ZDIM-1){ f18= fin[f_mem(18,x ,y+1,z+1,pitch)]; } f1 = tex2D(texRef_f1A ,x-1,y +YDIM*(z)); f3 = tex2D(texRef_f3A ,x+1,y +YDIM*(z)); f5 = tex2D(texRef_f5A ,x-1,y-1+YDIM*(z)); f6 = tex2D(texRef_f6A ,x+1,y-1+YDIM*(z)); f7 = tex2D(texRef_f7A ,x+1,y+1+YDIM*(z)); f8 = tex2D(texRef_f8A ,x-1,y+1+YDIM*(z)); f15= tex2D(texRef_f15A,x-1,y +YDIM*(z+1)); f17= tex2D(texRef_f17A,x+1,y +YDIM*(z+1)); f10= tex2D(texRef_f10A,x-1,y +YDIM*(z-1)); f12= tex2D(texRef_f12A,x+1,y +YDIM*(z-1)); int im = ImageFcn(x,y,z); if(im == 1){//BB fout[j+pitch*YDIM*ZDIM*1 ] = f3 ; fout[j+pitch*YDIM*ZDIM*2 ] = f4 ; fout[j+pitch*YDIM*ZDIM*3 ] = f1 ; fout[j+pitch*YDIM*ZDIM*4 ] = f2 ; fout[j+pitch*YDIM*ZDIM*5 ] = f7 ; fout[j+pitch*YDIM*ZDIM*6 ] = f8 ; fout[j+pitch*YDIM*ZDIM*7 ] = f5 ; fout[j+pitch*YDIM*ZDIM*8 ] = f6 ; fout[j+pitch*YDIM*ZDIM*9 ] = f14; fout[j+pitch*YDIM*ZDIM*10] = f17; fout[j+pitch*YDIM*ZDIM*11] = f18; fout[j+pitch*YDIM*ZDIM*12] = f15; fout[j+pitch*YDIM*ZDIM*13] = f16; fout[j+pitch*YDIM*ZDIM*14] = f9 ; fout[j+pitch*YDIM*ZDIM*15] = f12; fout[j+pitch*YDIM*ZDIM*16] = f13; fout[j+pitch*YDIM*ZDIM*17] = f10; fout[j+pitch*YDIM*ZDIM*18] = f11; } else{ if(im == 3)//DirichletWest { if(y == 0){ //f2 = f4; f6 = f7; //f11 = f13; //f16 = f18; } else if(y == YDIM-1){ //f4 = f2; f7 = f6; //f13 = f11; //f18 = f16; } if(z == 0){ //f9 = f14; //f10 = f15; //f11 = f16; f12 = f17; //f13 = f18; } if(z == ZDIM-1){ //f14 = f9; //f15 = f10; //f16 = f11; f17 = f12; //f18 = f13; } float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; //rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); //bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void mrt_d_hybBA(float* fin, float* fout, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fin[j]; f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)]; f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)]; f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)]; f11= fin[f_mem(11,x ,y-1,z-1,pitch)]; f13= fin[f_mem(13,x ,y+1,z-1,pitch)]; f14= fin[f_mem(14,x ,y ,z+1,pitch)]; f16= fin[f_mem(16,x ,y-1,z+1,pitch)]; if(z != ZDIM-1){ f18= fin[f_mem(18,x ,y+1,z+1,pitch)]; } f1 = tex2D(texRef_f1B ,x-1,y +YDIM*(z)); f3 = tex2D(texRef_f3B ,x+1,y +YDIM*(z)); f5 = tex2D(texRef_f5B ,x-1,y-1+YDIM*(z)); f6 = tex2D(texRef_f6B ,x+1,y-1+YDIM*(z)); f7 = tex2D(texRef_f7B ,x+1,y+1+YDIM*(z)); f8 = tex2D(texRef_f8B ,x-1,y+1+YDIM*(z)); f15= tex2D(texRef_f15B,x-1,y +YDIM*(z+1)); f17= tex2D(texRef_f17B,x+1,y +YDIM*(z+1)); f10= tex2D(texRef_f10B,x-1,y +YDIM*(z-1)); f12= tex2D(texRef_f12B,x+1,y +YDIM*(z-1)); int im = ImageFcn(x,y,z); if(im == 1){//BB fout[j+pitch*YDIM*ZDIM*1 ] = f3 ; fout[j+pitch*YDIM*ZDIM*2 ] = f4 ; fout[j+pitch*YDIM*ZDIM*3 ] = f1 ; fout[j+pitch*YDIM*ZDIM*4 ] = f2 ; fout[j+pitch*YDIM*ZDIM*5 ] = f7 ; fout[j+pitch*YDIM*ZDIM*6 ] = f8 ; fout[j+pitch*YDIM*ZDIM*7 ] = f5 ; fout[j+pitch*YDIM*ZDIM*8 ] = f6 ; fout[j+pitch*YDIM*ZDIM*9 ] = f14; fout[j+pitch*YDIM*ZDIM*10] = f17; fout[j+pitch*YDIM*ZDIM*11] = f18; fout[j+pitch*YDIM*ZDIM*12] = f15; fout[j+pitch*YDIM*ZDIM*13] = f16; fout[j+pitch*YDIM*ZDIM*14] = f9 ; fout[j+pitch*YDIM*ZDIM*15] = f12; fout[j+pitch*YDIM*ZDIM*16] = f13; fout[j+pitch*YDIM*ZDIM*17] = f10; fout[j+pitch*YDIM*ZDIM*18] = f11; } else{ if(im == 3)//DirichletWest { if(y == 0){ //f2 = f4; f6 = f7; //f11 = f13; //f16 = f18; } else if(y == YDIM-1){ //f4 = f2; f7 = f6; //f13 = f11; //f18 = f16; } if(z == 0){ //f9 = f14; //f10 = f15; //f11 = f16; f12 = f17; //f13 = f18; } if(z == ZDIM-1){ //f14 = f9; //f15 = f10; //f16 = f11; f17 = f12; //f18 = f13; } float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; //rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); //bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void mrt_d_textAB(float* fin, float* fout, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fin[j]; f2 = tex2D(texRef_f2A ,x ,y-1+YDIM*(z)); f4 = tex2D(texRef_f4A ,x ,y+1+YDIM*(z)); f9 = tex2D(texRef_f9A ,x ,y+1+YDIM*(z-1)); f11= tex2D(texRef_f11A,x ,y-1+YDIM*(z-1)); f13= tex2D(texRef_f13A,x ,y+1+YDIM*(z-1)); f14= tex2D(texRef_f14A,x ,y +YDIM*(z+1)); f16= tex2D(texRef_f16A,x ,y-1+YDIM*(z+1)); f18= tex2D(texRef_f18A,x ,y+1+YDIM*(z+1)); f1 = tex2D(texRef_f1A ,x-1,y +YDIM*(z)); f3 = tex2D(texRef_f3A ,x+1,y +YDIM*(z)); f5 = tex2D(texRef_f5A ,x-1,y-1+YDIM*(z)); f6 = tex2D(texRef_f6A ,x+1,y-1+YDIM*(z)); f7 = tex2D(texRef_f7A ,x+1,y+1+YDIM*(z)); f8 = tex2D(texRef_f8A ,x-1,y+1+YDIM*(z)); f15= tex2D(texRef_f15A,x-1,y +YDIM*(z+1)); f17= tex2D(texRef_f17A,x+1,y +YDIM*(z+1)); f10= tex2D(texRef_f10A,x-1,y +YDIM*(z-1)); f12= tex2D(texRef_f12A,x+1,y +YDIM*(z-1)); int im = ImageFcn(x,y,z); if(im == 1){//BB fout[j+pitch*YDIM*ZDIM*1 ] = f3 ; fout[j+pitch*YDIM*ZDIM*2 ] = f4 ; fout[j+pitch*YDIM*ZDIM*3 ] = f1 ; fout[j+pitch*YDIM*ZDIM*4 ] = f2 ; fout[j+pitch*YDIM*ZDIM*5 ] = f7 ; fout[j+pitch*YDIM*ZDIM*6 ] = f8 ; fout[j+pitch*YDIM*ZDIM*7 ] = f5 ; fout[j+pitch*YDIM*ZDIM*8 ] = f6 ; fout[j+pitch*YDIM*ZDIM*9 ] = f14; fout[j+pitch*YDIM*ZDIM*10] = f17; fout[j+pitch*YDIM*ZDIM*11] = f18; fout[j+pitch*YDIM*ZDIM*12] = f15; fout[j+pitch*YDIM*ZDIM*13] = f16; fout[j+pitch*YDIM*ZDIM*14] = f9 ; fout[j+pitch*YDIM*ZDIM*15] = f12; fout[j+pitch*YDIM*ZDIM*16] = f13; fout[j+pitch*YDIM*ZDIM*17] = f10; fout[j+pitch*YDIM*ZDIM*18] = f11; } else{ if(im == 3)//DirichletWest { if(y == 0){ //f2 = f4; f6 = f7; //f11 = f13; //f16 = f18; } else if(y == YDIM-1){ //f4 = f2; f7 = f6; //f13 = f11; //f18 = f16; } if(z == 0){ //f9 = f14; //f10 = f15; //f11 = f16; f12 = f17; //f13 = f18; } if(z == ZDIM-1){ //f14 = f9; //f15 = f10; //f16 = f11; f17 = f12; //f18 = f13; } float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; //rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); //bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void mrt_d_textBA(float* fin, float* fout, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fin[j]; f2 = tex2D(texRef_f2B ,x ,y-1+YDIM*(z)); f4 = tex2D(texRef_f4B ,x ,y+1+YDIM*(z)); f9 = tex2D(texRef_f9B ,x ,y+1+YDIM*(z-1)); f11= tex2D(texRef_f11B,x ,y-1+YDIM*(z-1)); f13= tex2D(texRef_f13B,x ,y+1+YDIM*(z-1)); f14= tex2D(texRef_f14B,x ,y +YDIM*(z+1)); f16= tex2D(texRef_f16B,x ,y-1+YDIM*(z+1)); f18= tex2D(texRef_f18B,x ,y+1+YDIM*(z+1)); f1 = tex2D(texRef_f1B ,x-1,y +YDIM*(z)); f3 = tex2D(texRef_f3B ,x+1,y +YDIM*(z)); f5 = tex2D(texRef_f5B ,x-1,y-1+YDIM*(z)); f6 = tex2D(texRef_f6B ,x+1,y-1+YDIM*(z)); f7 = tex2D(texRef_f7B ,x+1,y+1+YDIM*(z)); f8 = tex2D(texRef_f8B ,x-1,y+1+YDIM*(z)); f15= tex2D(texRef_f15B,x-1,y +YDIM*(z+1)); f17= tex2D(texRef_f17B,x+1,y +YDIM*(z+1)); f10= tex2D(texRef_f10B,x-1,y +YDIM*(z-1)); f12= tex2D(texRef_f12B,x+1,y +YDIM*(z-1)); int im = ImageFcn(x,y,z); if(im == 1){//BB fout[j+pitch*YDIM*ZDIM*1 ] = f3 ; fout[j+pitch*YDIM*ZDIM*2 ] = f4 ; fout[j+pitch*YDIM*ZDIM*3 ] = f1 ; fout[j+pitch*YDIM*ZDIM*4 ] = f2 ; fout[j+pitch*YDIM*ZDIM*5 ] = f7 ; fout[j+pitch*YDIM*ZDIM*6 ] = f8 ; fout[j+pitch*YDIM*ZDIM*7 ] = f5 ; fout[j+pitch*YDIM*ZDIM*8 ] = f6 ; fout[j+pitch*YDIM*ZDIM*9 ] = f14; fout[j+pitch*YDIM*ZDIM*10] = f17; fout[j+pitch*YDIM*ZDIM*11] = f18; fout[j+pitch*YDIM*ZDIM*12] = f15; fout[j+pitch*YDIM*ZDIM*13] = f16; fout[j+pitch*YDIM*ZDIM*14] = f9 ; fout[j+pitch*YDIM*ZDIM*15] = f12; fout[j+pitch*YDIM*ZDIM*16] = f13; fout[j+pitch*YDIM*ZDIM*17] = f10; fout[j+pitch*YDIM*ZDIM*18] = f11; } else{ if(im == 3)//DirichletWest { if(y == 0){ //f2 = f4; f6 = f7; //f11 = f13; //f16 = f18; } else if(y == YDIM-1){ //f4 = f2; f7 = f6; //f13 = f11; //f18 = f16; } if(z == 0){ //f9 = f14; //f10 = f15; //f11 = f16; f12 = f17; //f13 = f18; } if(z == ZDIM-1){ //f14 = f9; //f15 = f10; //f16 = f11; f17 = f12; //f18 = f13; } float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; //rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); //bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void mrt_d_shared(float* fA, float* fB, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,z); __shared__ float f1_s[BLOCKSIZEX]; __shared__ float f3_s[BLOCKSIZEX]; __shared__ float f5_s[BLOCKSIZEX]; __shared__ float f7_s[BLOCKSIZEX]; __shared__ float f6_s[BLOCKSIZEX]; __shared__ float f8_s[BLOCKSIZEX]; __shared__ float f10_s[BLOCKSIZEX]; __shared__ float f12_s[BLOCKSIZEX]; __shared__ float f15_s[BLOCKSIZEX]; __shared__ float f17_s[BLOCKSIZEX]; f1_s[threadIdx.x] = fA[f_mem(1 ,x ,y ,z ,pitch)];//dmax(x-1) f3_s[threadIdx.x] = fA[f_mem(3 ,x ,y ,z ,pitch)];//dmin(x+1,XDIM) // if(y != 0){//takin these out was good f5_s[threadIdx.x] = fA[f_mem(5 ,x ,y-1,z ,pitch)];//dmax(x-1) f6_s[threadIdx.x] = fA[f_mem(6 ,x ,y-1,z ,pitch)];//dmin(x+1,XDIM) // } // if(y != YDIM-1){ f7_s[threadIdx.x] = fA[f_mem(7 ,x ,y+1,z ,pitch)];//dmin(x+1,XDIM) f8_s[threadIdx.x] = fA[f_mem(8 ,x ,y+1,z ,pitch)];//dmax(x-1) // } // if(z != 0){ f10_s[threadIdx.x] = fA[f_mem(10,x ,y ,z-1,pitch)];//dmax(x-1) f12_s[threadIdx.x] = fA[f_mem(12,x ,y ,z-1,pitch)];//dmin(x+1,XDIM) // } // if(z != ZDIM-1){ f15_s[threadIdx.x] = fA[f_mem(15,x ,y ,z+1,pitch)];//dmax(x-1) f17_s[threadIdx.x] = fA[f_mem(17,x ,y ,z+1,pitch)];//dmin(x+1,XDIM) // } __syncthreads(); if(im == 1){//BB float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // if(y != 0){ f4 = fA[f_mem(2 ,x ,y-1,z ,pitch)]; // } // else f4 = 0.f; // if(y != YDIM-1){ f2 = fA[f_mem(4 ,x ,y+1,z ,pitch)]; // } // else f2 = 0.f; // if(z != ZDIM-1){ f9 = fA[f_mem(14,x ,y ,z+1,pitch)]; f13= fA[f_mem(16,x ,y-1,z+1,pitch)]; if(z != ZDIM-1){ f11= fA[f_mem(18,x ,y+1,z+1,pitch)]; } // } // else{ // f9 = 0.f; // f13= 0.f; // f11= 0.f; // } // else // f18 = 0.0f; // if(z != 0){ f14= fA[f_mem(9 ,x ,y ,z-1,pitch)]; f18= fA[f_mem(11,x ,y-1,z-1,pitch)]; f16= fA[f_mem(13,x ,y+1,z-1,pitch)]; // } // else{ // f14= 0.f; // f18= 0.f; // f16= 0.f; // } if(threadIdx.x != XDIM-1){ f1 = f3_s[threadIdx.x+1];//fA[f_mem(3 ,x+1,y ,z ,pitch,YDIM,ZDIM)]; f5 = f7_s[threadIdx.x+1];//fA[f_mem(6 ,x+1,y-1,z ,pitch,YDIM,ZDIM)]; f8 = f6_s[threadIdx.x+1];//fA[f_mem(7 ,x+1,y+1,z ,pitch,YDIM,ZDIM)]; f10=f17_s[threadIdx.x+1];//fA[f_mem(12,x+1,y ,z-1,pitch,YDIM,ZDIM)]; f15=f12_s[threadIdx.x+1];//fA[f_mem(17,x+1,y ,z+1,pitch,YDIM,ZDIM)]; } if(threadIdx.x != 0){ f3 = f1_s[threadIdx.x-1];//fA[f_mem(1 ,x-1,y ,z ,pitch,YDIM,ZDIM)]; f7 = f5_s[threadIdx.x-1];//fA[f_mem(5 ,x-1,y-1,z ,pitch,YDIM,ZDIM)]; f6 = f8_s[threadIdx.x-1];//fA[f_mem(8 ,x-1,y+1,z ,pitch,YDIM,ZDIM)]; f17=f10_s[threadIdx.x-1];//fA[f_mem(10,x-1,y ,z-1,pitch,YDIM,ZDIM)]; f12=f15_s[threadIdx.x-1];//fA[f_mem(15,x-1,y ,z+1,pitch,YDIM,ZDIM)]; } fB[f_mem(1 ,x,y,z,pitch)] = f1 ; fB[f_mem(2 ,x,y,z,pitch)] = f2 ; fB[f_mem(3 ,x,y,z,pitch)] = f3 ; fB[f_mem(4 ,x,y,z,pitch)] = f4 ; fB[f_mem(5 ,x,y,z,pitch)] = f5 ; fB[f_mem(6 ,x,y,z,pitch)] = f6 ; fB[f_mem(7 ,x,y,z,pitch)] = f7 ; fB[f_mem(8 ,x,y,z,pitch)] = f8 ; fB[f_mem(9 ,x,y,z,pitch)] = f9 ; fB[f_mem(10,x,y,z,pitch)] = f10; fB[f_mem(11,x,y,z,pitch)] = f11; fB[f_mem(12,x,y,z,pitch)] = f12; fB[f_mem(13,x,y,z,pitch)] = f13; fB[f_mem(14,x,y,z,pitch)] = f14; fB[f_mem(15,x,y,z,pitch)] = f15; fB[f_mem(16,x,y,z,pitch)] = f16; fB[f_mem(17,x,y,z,pitch)] = f17; fB[f_mem(18,x,y,z,pitch)] = f18; } else{ float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fA[j]; // if(y != 0){ f2 = fA[f_mem(2 ,x ,y-1,z ,pitch)]; // } // if(y != YDIM-1){ f4 = fA[f_mem(4 ,x ,y+1,z ,pitch)]; // } // if(z != ZDIM-1){ f14= fA[f_mem(14,x ,y ,z+1,pitch)]; f16= fA[f_mem(16,x ,y-1,z+1,pitch)]; if(z != ZDIM-1){ f18= fA[f_mem(18,x ,y+1,z+1,pitch)]; } // } // else{ // f18 = 0.0f; // } // if(z != 0){ f9 = fA[f_mem(9 ,x ,y ,z-1,pitch)]; f11= fA[f_mem(11,x ,y-1,z-1,pitch)]; f13= fA[f_mem(13,x ,y+1,z-1,pitch)]; // } if(threadIdx.x != XDIM-1){ f3 = f3_s[threadIdx.x+1];//fA[f_mem(3 ,x+1,y ,z ,pitch,YDIM,ZDIM)]; f6 = f6_s[threadIdx.x+1];//fA[f_mem(6 ,x+1,y-1,z ,pitch,YDIM,ZDIM)]; f7 = f7_s[threadIdx.x+1];//fA[f_mem(7 ,x+1,y+1,z ,pitch,YDIM,ZDIM)]; f12=f12_s[threadIdx.x+1];//fA[f_mem(12,x+1,y ,z-1,pitch,YDIM,ZDIM)]; f17=f17_s[threadIdx.x+1];//fA[f_mem(17,x+1,y ,z+1,pitch,YDIM,ZDIM)]; } if(threadIdx.x != 0){ f1 = f1_s[threadIdx.x-1];//fA[f_mem(1 ,x-1,y ,z ,pitch,YDIM,ZDIM)]; f5 = f5_s[threadIdx.x-1];//fA[f_mem(5 ,x-1,y-1,z ,pitch,YDIM,ZDIM)]; f8 = f8_s[threadIdx.x-1];//fA[f_mem(8 ,x-1,y+1,z ,pitch,YDIM,ZDIM)]; f10=f10_s[threadIdx.x-1];//fA[f_mem(10,x-1,y ,z-1,pitch,YDIM,ZDIM)]; f15=f15_s[threadIdx.x-1];//fA[f_mem(15,x-1,y ,z+1,pitch,YDIM,ZDIM)]; } if(im == 3)//DirichletWest { if(y == 0){ f2 = f4; f6 = f7; f11 = f13; f16 = f18; } else if(y == YDIM-1){ f4 = f2; f7 = f6; f13 = f11; f18 = f16; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f12 = f17; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f17 = f12; f18 = f13; } float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); //bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fB[f_mem(0 ,x,y,z,pitch)] = f0 ; fB[f_mem(1 ,x,y,z,pitch)] = f1 ; fB[f_mem(2 ,x,y,z,pitch)] = f2 ; fB[f_mem(3 ,x,y,z,pitch)] = f3 ; fB[f_mem(4 ,x,y,z,pitch)] = f4 ; fB[f_mem(5 ,x,y,z,pitch)] = f5 ; fB[f_mem(6 ,x,y,z,pitch)] = f6 ; fB[f_mem(7 ,x,y,z,pitch)] = f7 ; fB[f_mem(8 ,x,y,z,pitch)] = f8 ; fB[f_mem(9 ,x,y,z,pitch)] = f9 ; fB[f_mem(10,x,y,z,pitch)] = f10; fB[f_mem(11,x,y,z,pitch)] = f11; fB[f_mem(12,x,y,z,pitch)] = f12; fB[f_mem(13,x,y,z,pitch)] = f13; fB[f_mem(14,x,y,z,pitch)] = f14; fB[f_mem(15,x,y,z,pitch)] = f15; fB[f_mem(16,x,y,z,pitch)] = f16; fB[f_mem(17,x,y,z,pitch)] = f17; fB[f_mem(18,x,y,z,pitch)] = f18; } } //{ // int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem // int y = threadIdx.y+blockIdx.y*blockDim.y; // int z = threadIdx.z+blockIdx.z*blockDim.z; // int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) // int im = ImageFcn(x,y,z); // // __shared__ float f1_s[BLOCKSIZEX]; // __shared__ float f3_s[BLOCKSIZEX]; // __shared__ float f5_s[BLOCKSIZEX]; // __shared__ float f7_s[BLOCKSIZEX]; // __shared__ float f6_s[BLOCKSIZEX]; // __shared__ float f8_s[BLOCKSIZEX]; // __shared__ float f10_s[BLOCKSIZEX]; // __shared__ float f12_s[BLOCKSIZEX]; // __shared__ float f15_s[BLOCKSIZEX]; // __shared__ float f17_s[BLOCKSIZEX]; // // f1_s[threadIdx.x] = fA[f_mem(1 ,x ,y ,z ,pitch)];//dmax(x-1) // f3_s[threadIdx.x] = fA[f_mem(3 ,x ,y ,z ,pitch)];//dmin(x+1,XDIM) //// if(y != 0){//takin these out was good // f5_s[threadIdx.x] = fA[f_mem(5 ,x ,y-1,z ,pitch)];//dmax(x-1) // f6_s[threadIdx.x] = fA[f_mem(6 ,x ,y-1,z ,pitch)];//dmin(x+1,XDIM) //// } //// if(y != YDIM-1){ // f7_s[threadIdx.x] = fA[f_mem(7 ,x ,y+1,z ,pitch)];//dmin(x+1,XDIM) // f8_s[threadIdx.x] = fA[f_mem(8 ,x ,y+1,z ,pitch)];//dmax(x-1) //// } //// if(z != 0){ // f10_s[threadIdx.x] = fA[f_mem(10,x ,y ,z-1,pitch)];//dmax(x-1) // f12_s[threadIdx.x] = fA[f_mem(12,x ,y ,z-1,pitch)];//dmin(x+1,XDIM) //// } //// if(z != ZDIM-1){ // f15_s[threadIdx.x] = fA[f_mem(15,x ,y ,z+1,pitch)];//dmax(x-1) // f17_s[threadIdx.x] = fA[f_mem(17,x ,y ,z+1,pitch)];//dmin(x+1,XDIM) //// } // __syncthreads(); // // float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // // f0 = fA[j]; // f2 = fA[f_mem(2 ,x ,y-1,z ,pitch)]; // f4 = fA[f_mem(4 ,x ,y+1,z ,pitch)]; // f14= fA[f_mem(14,x ,y ,z+1,pitch)]; // f16= fA[f_mem(16,x ,y-1,z+1,pitch)]; // if(z != ZDIM-1){ // f18= fA[f_mem(18,x ,y+1,z+1,pitch)]; // } //// else //// f18 = 0.0f; // // f9 = fA[f_mem(9 ,x ,y ,z-1,pitch)]; // f11= fA[f_mem(11,x ,y-1,z-1,pitch)]; // f13= fA[f_mem(13,x ,y+1,z-1,pitch)]; // // if(threadIdx.x != XDIM-1){ // f3 = f3_s[threadIdx.x+1];//fA[f_mem(3 ,x+1,y ,z ,pitch,YDIM,ZDIM)]; // f6 = f6_s[threadIdx.x+1];//fA[f_mem(6 ,x+1,y-1,z ,pitch,YDIM,ZDIM)]; // f7 = f7_s[threadIdx.x+1];//fA[f_mem(7 ,x+1,y+1,z ,pitch,YDIM,ZDIM)]; // f12=f12_s[threadIdx.x+1];//fA[f_mem(12,x+1,y ,z-1,pitch,YDIM,ZDIM)]; // f17=f17_s[threadIdx.x+1];//fA[f_mem(17,x+1,y ,z+1,pitch,YDIM,ZDIM)]; // } // if(threadIdx.x != 0){ // f1 = f1_s[threadIdx.x-1];//fA[f_mem(1 ,x-1,y ,z ,pitch,YDIM,ZDIM)]; // f5 = f5_s[threadIdx.x-1];//fA[f_mem(5 ,x-1,y-1,z ,pitch,YDIM,ZDIM)]; // f8 = f8_s[threadIdx.x-1];//fA[f_mem(8 ,x-1,y+1,z ,pitch,YDIM,ZDIM)]; // f10=f10_s[threadIdx.x-1];//fA[f_mem(10,x-1,y ,z-1,pitch,YDIM,ZDIM)]; // f15=f15_s[threadIdx.x-1];//fA[f_mem(15,x-1,y ,z+1,pitch,YDIM,ZDIM)]; // } // // if(im == 1){//BB // fB[f_mem(1 ,x,y,z,pitch)] = f3 ; // fB[f_mem(2 ,x,y,z,pitch)] = f4 ; // fB[f_mem(3 ,x,y,z,pitch)] = f1 ; // fB[f_mem(4 ,x,y,z,pitch)] = f2 ; // fB[f_mem(5 ,x,y,z,pitch)] = f7 ; // fB[f_mem(6 ,x,y,z,pitch)] = f8 ; // fB[f_mem(7 ,x,y,z,pitch)] = f5 ; // fB[f_mem(8 ,x,y,z,pitch)] = f6 ; // fB[f_mem(9 ,x,y,z,pitch)] = f14; // fB[f_mem(10,x,y,z,pitch)] = f17; // fB[f_mem(11,x,y,z,pitch)] = f18; // fB[f_mem(12,x,y,z,pitch)] = f15; // fB[f_mem(13,x,y,z,pitch)] = f16; // fB[f_mem(14,x,y,z,pitch)] = f9 ; // fB[f_mem(15,x,y,z,pitch)] = f12; // fB[f_mem(16,x,y,z,pitch)] = f13; // fB[f_mem(17,x,y,z,pitch)] = f10; // fB[f_mem(18,x,y,z,pitch)] = f11; // } // else{ // if(im == 3)//DirichletWest // { // if(y == 0){ //// f2 = f4; // f6 = f7; //// f11 = f13; //// f16 = f18; // } // else if(y == YDIM-1){ //// f4 = f2; // f7 = f6; //// f13 = f11; //// f18 = f16; // } // if(z == 0){ //// f9 = f14; //// f10 = f15; //// f11 = f16; // f12 = f17; //// f13 = f18; // } // else if(z == ZDIM-1){ //// f14 = f9; //// f15 = f10; //// f16 = f11; // f17 = f12; //// f18 = f13; // } // float u,v,w;//,rho; // u = 0.0f;//*PoisProf(zcoord)*1.5; // v = UMAX;//0.0; // w = 0.0f; // // f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; // f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); // f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); // f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); // f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); // // } // // mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); // //bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); // // // //fB[f_mem(0 ,x,y,z,pitch)] = f0 ; // fB[j] = f0 ; // fB[f_mem(1 ,x,y,z,pitch)] = f1 ; // fB[f_mem(2 ,x,y,z,pitch)] = f2 ; // fB[f_mem(3 ,x,y,z,pitch)] = f3 ; // fB[f_mem(4 ,x,y,z,pitch)] = f4 ; // fB[f_mem(5 ,x,y,z,pitch)] = f5 ; // fB[f_mem(6 ,x,y,z,pitch)] = f6 ; // fB[f_mem(7 ,x,y,z,pitch)] = f7 ; // fB[f_mem(8 ,x,y,z,pitch)] = f8 ; // fB[f_mem(9 ,x,y,z,pitch)] = f9 ; // fB[f_mem(10,x,y,z,pitch)] = f10; // fB[f_mem(11,x,y,z,pitch)] = f11; // fB[f_mem(12,x,y,z,pitch)] = f12; // fB[f_mem(13,x,y,z,pitch)] = f13; // fB[f_mem(14,x,y,z,pitch)] = f14; // fB[f_mem(15,x,y,z,pitch)] = f15; // fB[f_mem(16,x,y,z,pitch)] = f16; // fB[f_mem(17,x,y,z,pitch)] = f17; // fB[f_mem(18,x,y,z,pitch)] = f18; // } //} __global__ void mrt_d_single(float* fA, float* fB, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,z); float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fA[j]; f1 = fA[f_mem(1 ,x-1,y ,z ,pitch)]; f3 = fA[f_mem(3 ,x+1,y ,z ,pitch)]; f2 = fA[f_mem(2 ,x ,y-1,z ,pitch)]; f5 = fA[f_mem(5 ,x-1,y-1,z ,pitch)]; f6 = fA[f_mem(6 ,x+1,y-1,z ,pitch)]; f4 = fA[f_mem(4 ,x ,y+1,z ,pitch)]; f7 = fA[f_mem(7 ,x+1,y+1,z ,pitch)]; f8 = fA[f_mem(8 ,x-1,y+1,z ,pitch)]; f9 = fA[f_mem(9 ,x ,y ,z-1,pitch)]; f10= fA[f_mem(10,x-1,y ,z-1,pitch)]; f11= fA[f_mem(11,x ,y-1,z-1,pitch)]; f12= fA[f_mem(12,x+1,y ,z-1,pitch)]; f13= fA[f_mem(13,x ,y+1,z-1,pitch)]; f14= fA[f_mem(14,x ,y ,z+1,pitch)]; f15= fA[f_mem(15,x-1,y ,z+1,pitch)]; f16= fA[f_mem(16,x ,y-1,z+1,pitch)]; f17= fA[f_mem(17,x+1,y ,z+1,pitch)]; f18= fA[f_mem(18,x ,y+1,dmin(z+1,ZDIM),pitch)]; if(im == 1){//BB fB[f_mem(1 ,x,y,z,pitch)] = f3 ; fB[f_mem(2 ,x,y,z,pitch)] = f4 ; fB[f_mem(3 ,x,y,z,pitch)] = f1 ; fB[f_mem(4 ,x,y,z,pitch)] = f2 ; fB[f_mem(5 ,x,y,z,pitch)] = f7 ; fB[f_mem(6 ,x,y,z,pitch)] = f8 ; fB[f_mem(7 ,x,y,z,pitch)] = f5 ; fB[f_mem(8 ,x,y,z,pitch)] = f6 ; fB[f_mem(9 ,x,y,z,pitch)] = f14; fB[f_mem(10,x,y,z,pitch)] = f17; fB[f_mem(11,x,y,z,pitch)] = f18; fB[f_mem(12,x,y,z,pitch)] = f15; fB[f_mem(13,x,y,z,pitch)] = f16; fB[f_mem(14,x,y,z,pitch)] = f9 ; fB[f_mem(15,x,y,z,pitch)] = f12; fB[f_mem(16,x,y,z,pitch)] = f13; fB[f_mem(17,x,y,z,pitch)] = f10; fB[f_mem(18,x,y,z,pitch)] = f11; } else{ if(im == 3)//DirichletWest { if(y == 0){ //f2 = f4; f6 = f7; //f11 = f13; //f16 = f18; } else if(y == YDIM-1){ //f4 = f2; f7 = f6; //f13 = f11; //f18 = f16; } if(z == 0){ //f9 = f14; //f10 = f15; //f11 = f16; f12 = f17; //f13 = f18; } else if(z == ZDIM-1){ //f14 = f9; //f15 = f10; //f16 = f11; f17 = f12; //f18 = f13; } float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); //bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fB[f_mem(0 ,x,y,z,pitch)] = f0 ; fB[f_mem(1 ,x,y,z,pitch)] = f1 ; fB[f_mem(2 ,x,y,z,pitch)] = f2 ; fB[f_mem(3 ,x,y,z,pitch)] = f3 ; fB[f_mem(4 ,x,y,z,pitch)] = f4 ; fB[f_mem(5 ,x,y,z,pitch)] = f5 ; fB[f_mem(6 ,x,y,z,pitch)] = f6 ; fB[f_mem(7 ,x,y,z,pitch)] = f7 ; fB[f_mem(8 ,x,y,z,pitch)] = f8 ; fB[f_mem(9 ,x,y,z,pitch)] = f9 ; fB[f_mem(10,x,y,z,pitch)] = f10; fB[f_mem(11,x,y,z,pitch)] = f11; fB[f_mem(12,x,y,z,pitch)] = f12; fB[f_mem(13,x,y,z,pitch)] = f13; fB[f_mem(14,x,y,z,pitch)] = f14; fB[f_mem(15,x,y,z,pitch)] = f15; fB[f_mem(16,x,y,z,pitch)] = f16; fB[f_mem(17,x,y,z,pitch)] = f17; fB[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void initialize_single(float *f, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) float u,v,w,rho,usqr; rho = 1.f; u = 0.0f; v = 0.0f; w = 0.0f; //if(x == 3 ) u = 0.1f; usqr = u*u+v*v+w*w; f[j+0 *pitch*YDIM*ZDIM]= 1.0f/3.0f*(rho-1.5f*usqr); f[j+1 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); f[j+2 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); f[j+3 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); f[j+4 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); f[j+5 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); f[j+6 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f[j+7 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f[j+8 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); f[j+9 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); f[j+10*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); f[j+11*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr); f[j+12*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); f[j+13*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr); f[j+14*pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); f[j+15*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); f[j+16*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); f[j+17*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f[j+18*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); } __global__ void initialize(float* f0, float* f1, float* f2, float* f3, float* f4, float* f5, float* f6, float* f7, float* f8, float* f9, float* f10, float* f11, float* f12, float* f13, float* f14, float* f15, float* f16, float* f17, float* f18, size_t pitch)//pitch in elements //__global__ void initialize(void** f0in, void** f1in, // int w, int h, int pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; // int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) // f1out[j] = tex2D(texRef_f2A,x,y+h*z); float u,v,w,rho,feq,usqr; rho = 1.0f; u = 0.0f; v = 0.0f; w = 0.0f; //if(x == 3 ) u = 0.1f; usqr = u*u+v*v+w*w; feq = 1.0f/3.0f*(rho-1.5f*usqr); f0[j] = feq; feq = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); f1[j] = feq; feq = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); f2[j] = feq; feq = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); f3[j] = feq; feq = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); f4[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); f5[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f6[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f7[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); f8[j] = feq; feq = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); f9[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); f10[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr); f11[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); f12[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr); f13[j] = feq; feq = 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); f14[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); f15[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); f16[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f17[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); f18[j] = feq; } int main(int argc, char *argv[]) { // float *f0_h, *f1_h, *f2_h, *f3_h, *f4_h, *f5_h, *f6_h, *f7_h, *f8_h, *f9_h; // float *f10_h, *f11_h, *f12_h, *f13_h, *f14_h, *f15_h, *f16_h, *f17_h, *f18_h; // float *f0_dA, *f1_dA, *f2_dA, *f3_dA, *f4_dA, *f5_dA, *f6_dA, *f7_dA, *f8_dA, *f9_dA; // float *f10_dA, *f11_dA, *f12_dA, *f13_dA, *f14_dA, *f15_dA, *f16_dA, *f17_dA, *f18_dA; // float *f0_dB, *f1_dB, *f2_dB, *f3_dB, *f4_dB, *f5_dB, *f6_dB, *f7_dB, *f8_dB, *f9_dB; // float *f10_dB, *f11_dB, *f12_dB, *f13_dB, *f14_dB, *f15_dB, *f16_dB, *f17_dB, *f18_dB; int *image_d, *image_h; //hipPitchedPtr f0_d; ofstream output; output.open ("LBM1_out.dat"); size_t memsize, memsize_int; size_t pitch; int i, n, nBlocks; float omega, CharLength; CharLength = XDIM-2.f; omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f); cout<<"omega: "<<omega<<endl; cout<<"blocksize: "<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl; cout<<"grid: "<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl; cout<<"TMAX: "<<TMAX<<endl; nBlocks = (XDIM/BLOCKSIZEX+XDIM%BLOCKSIZEX)*(YDIM/BLOCKSIZEY+YDIM%BLOCKSIZEY) *(ZDIM/BLOCKSIZEZ+ZDIM%BLOCKSIZEZ); int B = BLOCKSIZEX*BLOCKSIZEY*BLOCKSIZEZ; n = nBlocks*B;//block*dimx*dimy cout<<"nBlocks:"<<nBlocks<<endl; dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ); dim3 grid(XDIM/BLOCKSIZEX,YDIM/BLOCKSIZEY,ZDIM/BLOCKSIZEZ); memsize = n*sizeof(float); memsize_int = n*sizeof(int); hipExtent extent = make_hipExtent(XDIM*sizeof(float),YDIM,ZDIM); image_h = (int *)malloc(memsize_int); float *fA_h,*fA_d,*fB_d; fA_h = (float *)malloc(memsize*19); hipMallocPitch((void **) &fA_d, &pitch, XDIM*sizeof(float), YDIM*ZDIM*19); hipMallocPitch((void **) &fB_d, &pitch, XDIM*sizeof(float), YDIM*ZDIM*19); hipMalloc((void **) &image_d, memsize_int); cout<<pitch<<endl; size_t pitch_elements = pitch/sizeof(float); hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); for (i = 0; i < n*19; i++) { fA_h[i] = i; } for (i = 0; i < n; i++) { int x = i%XDIM; int y = (i/XDIM)%YDIM; int z = (i/XDIM)/YDIM; fA_h[i] = 0; image_h[i] = 0; if(x < 1) image_h[i] = 1;//DirichletWest if(x > XDIM-2) image_h[i] = 1;//BB if(y < 1) image_h[i] = 1;//BB if(y > YDIM-2) image_h[i] = 1;//BB if(z < 1) image_h[i] = 1;//DirichletWest if(z > ZDIM-2) image_h[i] = 1;//BB } hipMemcpy(image_d, image_h, memsize_int, hipMemcpyHostToDevice); if(true)//texture settings { texRef_f0B.normalized = false; texRef_f1B.normalized = false; texRef_f2B.normalized = false; texRef_f3B.normalized = false; texRef_f4B.normalized = false; texRef_f5B.normalized = false; texRef_f6B.normalized = false; texRef_f7B.normalized = false; texRef_f8B.normalized = false; texRef_f9B.normalized = false; texRef_f10B.normalized = false; texRef_f11B.normalized = false; texRef_f12B.normalized = false; texRef_f13B.normalized = false; texRef_f14B.normalized = false; texRef_f15B.normalized = false; texRef_f16B.normalized = false; texRef_f17B.normalized = false; texRef_f18B.normalized = false; texRef_f0B.filterMode = hipFilterModePoint; texRef_f1B.filterMode = hipFilterModePoint; texRef_f2B.filterMode = hipFilterModePoint; texRef_f3B.filterMode = hipFilterModePoint; texRef_f4B.filterMode = hipFilterModePoint; texRef_f5B.filterMode = hipFilterModePoint; texRef_f6B.filterMode = hipFilterModePoint; texRef_f7B.filterMode = hipFilterModePoint; texRef_f8B.filterMode = hipFilterModePoint; texRef_f9B.filterMode = hipFilterModePoint; texRef_f10B.filterMode = hipFilterModePoint; texRef_f11B.filterMode = hipFilterModePoint; texRef_f12B.filterMode = hipFilterModePoint; texRef_f13B.filterMode = hipFilterModePoint; texRef_f14B.filterMode = hipFilterModePoint; texRef_f15B.filterMode = hipFilterModePoint; texRef_f16B.filterMode = hipFilterModePoint; texRef_f17B.filterMode = hipFilterModePoint; texRef_f18B.filterMode = hipFilterModePoint; texRef_f0A.normalized = false; texRef_f1A.normalized = false; texRef_f2A.normalized = false; texRef_f3A.normalized = false; texRef_f4A.normalized = false; texRef_f5A.normalized = false; texRef_f6A.normalized = false; texRef_f7A.normalized = false; texRef_f8A.normalized = false; texRef_f9A.normalized = false; texRef_f10A.normalized = false; texRef_f11A.normalized = false; texRef_f12A.normalized = false; texRef_f13A.normalized = false; texRef_f14A.normalized = false; texRef_f15A.normalized = false; texRef_f16A.normalized = false; texRef_f17A.normalized = false; texRef_f18A.normalized = false; texRef_f0A.filterMode = hipFilterModePoint; texRef_f1A.filterMode = hipFilterModePoint; texRef_f2A.filterMode = hipFilterModePoint; texRef_f3A.filterMode = hipFilterModePoint; texRef_f4A.filterMode = hipFilterModePoint; texRef_f5A.filterMode = hipFilterModePoint; texRef_f6A.filterMode = hipFilterModePoint; texRef_f7A.filterMode = hipFilterModePoint; texRef_f8A.filterMode = hipFilterModePoint; texRef_f9A.filterMode = hipFilterModePoint; texRef_f10A.filterMode = hipFilterModePoint; texRef_f11A.filterMode = hipFilterModePoint; texRef_f12A.filterMode = hipFilterModePoint; texRef_f13A.filterMode = hipFilterModePoint; texRef_f14A.filterMode = hipFilterModePoint; texRef_f15A.filterMode = hipFilterModePoint; texRef_f16A.filterMode = hipFilterModePoint; texRef_f17A.filterMode = hipFilterModePoint; texRef_f18A.filterMode = hipFilterModePoint; } hipMemcpy2D(fA_d ,pitch,fA_h ,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM*19,hipMemcpyHostToDevice); hipMemcpy2D(fB_d ,pitch,fA_h ,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM*19,hipMemcpyHostToDevice); for (i = 0; i < n*19; i++) { fA_h[i] = 0; } if(true)//bind texture { hipBindTexture2D(0,&texRef_f0A, fA_d ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f1A, fA_d+pitch_elements*YDIM*ZDIM ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f2A, fA_d+pitch_elements*YDIM*ZDIM*2 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f3A, fA_d+pitch_elements*YDIM*ZDIM*3 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f4A, fA_d+pitch_elements*YDIM*ZDIM*4 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f5A, fA_d+pitch_elements*YDIM*ZDIM*5 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f6A, fA_d+pitch_elements*YDIM*ZDIM*6 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f7A, fA_d+pitch_elements*YDIM*ZDIM*7 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f8A, fA_d+pitch_elements*YDIM*ZDIM*8 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f9A, fA_d+pitch_elements*YDIM*ZDIM*9 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f10A,fA_d+pitch_elements*YDIM*ZDIM*10,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f11A,fA_d+pitch_elements*YDIM*ZDIM*11,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f12A,fA_d+pitch_elements*YDIM*ZDIM*12,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f13A,fA_d+pitch_elements*YDIM*ZDIM*13,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f14A,fA_d+pitch_elements*YDIM*ZDIM*14,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f15A,fA_d+pitch_elements*YDIM*ZDIM*15,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f16A,fA_d+pitch_elements*YDIM*ZDIM*16,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f17A,fA_d+pitch_elements*YDIM*ZDIM*17,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f18A,fA_d+pitch_elements*YDIM*ZDIM*18,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f0B, fB_d ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f1B, fB_d+pitch_elements*YDIM*ZDIM ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f2B, fB_d+pitch_elements*YDIM*ZDIM*2 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f3B, fB_d+pitch_elements*YDIM*ZDIM*3 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f4B, fB_d+pitch_elements*YDIM*ZDIM*4 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f5B, fB_d+pitch_elements*YDIM*ZDIM*5 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f6B, fB_d+pitch_elements*YDIM*ZDIM*6 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f7B, fB_d+pitch_elements*YDIM*ZDIM*7 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f8B, fB_d+pitch_elements*YDIM*ZDIM*8 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f9B, fB_d+pitch_elements*YDIM*ZDIM*9 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f10B,fB_d+pitch_elements*YDIM*ZDIM*10,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f11B,fB_d+pitch_elements*YDIM*ZDIM*11,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f12B,fB_d+pitch_elements*YDIM*ZDIM*12,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f13B,fB_d+pitch_elements*YDIM*ZDIM*13,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f14B,fB_d+pitch_elements*YDIM*ZDIM*14,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f15B,fB_d+pitch_elements*YDIM*ZDIM*15,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f16B,fB_d+pitch_elements*YDIM*ZDIM*16,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f17B,fB_d+pitch_elements*YDIM*ZDIM*17,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f18B,fB_d+pitch_elements*YDIM*ZDIM*18,&desc,XDIM,YDIM*ZDIM,pitch); } // initialize<<<grid, threads>>>(f0_dA.ptr, f1_dA.ptr, f2_dA.ptr, f3_dA.ptr, f4_dA.ptr, f5_dA.ptr, f6_dA.ptr, f7_dA.ptr, f8_dA.ptr, f9_dA.ptr, // f10_dA.ptr, f11_dA.ptr, f12_dA.ptr, f13_dA.ptr, f14_dA.ptr, f15_dA.ptr, f16_dA.ptr, f17_dA.ptr, f18_dA.ptr, // XDIM,YDIM,pitch); // initialize<<<grid, threads>>>(f0_dA, f1_dA, f2_dA, f3_dA, f4_dA, f5_dA, f6_dA, f7_dA, f8_dA, f9_dA, // f10_dA, f11_dA, f12_dA, f13_dA, f14_dA, f15_dA, f16_dA, f17_dA, f18_dA, // XDIM,YDIM,pitch_elements); hipLaunchKernelGGL(( initialize_single), dim3(grid), dim3(threads), 0, 0, fA_d,pitch_elements); // hipFuncSetCacheConfig(mrt_d_single,hipFuncCachePreferL1); struct timeval tdr0,tdr1; double restime; hipDeviceSynchronize(); gettimeofday (&tdr0,NULL); for(int t = 0; t<TMAX; t=t+2){ //for(int t = 0; t<TMAX; t=t+1){ if(METHOD == "SINGLE"){ hipLaunchKernelGGL(( mrt_d_single), dim3(grid), dim3(threads), 0, 0, fA_d,fB_d,omega,pitch_elements); hipLaunchKernelGGL(( mrt_d_single), dim3(grid), dim3(threads), 0, 0, fB_d,fA_d,omega,pitch_elements); } else if(METHOD == "HYB"){ hipLaunchKernelGGL(( mrt_d_hybAB), dim3(grid), dim3(threads), 0, 0, fA_d,fB_d,omega,pitch_elements); hipLaunchKernelGGL(( mrt_d_hybBA), dim3(grid), dim3(threads), 0, 0, fB_d,fA_d,omega,pitch_elements); } else if(METHOD == "TEXT"){ hipLaunchKernelGGL(( mrt_d_textAB), dim3(grid), dim3(threads), 0, 0, fA_d,fB_d,omega,pitch_elements); hipLaunchKernelGGL(( mrt_d_textBA), dim3(grid), dim3(threads), 0, 0, fB_d,fA_d,omega,pitch_elements); } else if(METHOD == "SHARED"){ hipLaunchKernelGGL(( mrt_d_shared), dim3(grid), dim3(threads), 0, 0, fA_d,fB_d,omega,pitch_elements); hipLaunchKernelGGL(( mrt_d_shared), dim3(grid), dim3(threads), 0, 0, fB_d,fA_d,omega,pitch_elements); } // simple_copy<<<grid, threads>>>(fA_d,fB_d,image_d,omega,UMAX,XDIM,YDIM,ZDIM,pitch_elements); // simple_copy<<<grid, threads>>>(fB_d,fA_d,image_d,omega,UMAX,XDIM,YDIM,ZDIM,pitch_elements); if(t%1000 == 0 && t>0) cout<<"finished "<<t<<" timesteps\n"; } hipDeviceSynchronize(); gettimeofday (&tdr1,NULL); timeval_subtract (&restime, &tdr1, &tdr0); cout<<"Time taken for main kernel: "<<restime<<" (" <<double(XDIM*YDIM*ZDIM*double(TMAX/1000000.f))/restime<<"MLUPS)"<<endl; cout<<XDIM<<","<<YDIM<<","<<ZDIM<<","<<TMAX<<","<<restime<<endl; // copytest<<<grid, threads>>>(f10_dA,test_d,XDIM,YDIM,ZDIM); //copytest<<<grid, threads>>>(test_d); //copytest<<<grid, threads>>>(image_d); hipUnbindTexture(texRef_f0A); hipUnbindTexture(texRef_f1A); hipUnbindTexture(texRef_f2A); hipUnbindTexture(texRef_f3A); hipUnbindTexture(texRef_f4A); hipUnbindTexture(texRef_f5A); hipUnbindTexture(texRef_f6A); hipUnbindTexture(texRef_f7A); hipUnbindTexture(texRef_f8A); hipUnbindTexture(texRef_f9A); hipUnbindTexture(texRef_f10A); hipUnbindTexture(texRef_f11A); hipUnbindTexture(texRef_f12A); hipUnbindTexture(texRef_f13A); hipUnbindTexture(texRef_f14A); hipUnbindTexture(texRef_f15A); hipUnbindTexture(texRef_f16A); hipUnbindTexture(texRef_f17A); hipUnbindTexture(texRef_f18A); hipUnbindTexture(texRef_f0B); hipUnbindTexture(texRef_f1B); hipUnbindTexture(texRef_f2B); hipUnbindTexture(texRef_f3B); hipUnbindTexture(texRef_f4B); hipUnbindTexture(texRef_f5B); hipUnbindTexture(texRef_f6B); hipUnbindTexture(texRef_f7B); hipUnbindTexture(texRef_f8B); hipUnbindTexture(texRef_f9B); hipUnbindTexture(texRef_f10B); hipUnbindTexture(texRef_f11B); hipUnbindTexture(texRef_f12B); hipUnbindTexture(texRef_f13B); hipUnbindTexture(texRef_f14B); hipUnbindTexture(texRef_f15B); hipUnbindTexture(texRef_f16B); hipUnbindTexture(texRef_f17B); hipUnbindTexture(texRef_f18B); // hipMemcpy2D(f0_h,XDIM*sizeof(float) , f0_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f1_h,XDIM*sizeof(float) , f1_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f2_h,XDIM*sizeof(float) , f2_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f3_h,XDIM*sizeof(float) , f3_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f4_h,XDIM*sizeof(float) , f4_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f5_h,XDIM*sizeof(float) , f5_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f6_h,XDIM*sizeof(float) , f6_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f7_h,XDIM*sizeof(float) , f7_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f8_h,XDIM*sizeof(float) , f8_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f9_h,XDIM*sizeof(float) , f9_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f10_h,XDIM*sizeof(float),f10_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f11_h,XDIM*sizeof(float),f11_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f12_h,XDIM*sizeof(float),f12_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f13_h,XDIM*sizeof(float),f13_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f14_h,XDIM*sizeof(float),f14_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f15_h,XDIM*sizeof(float),f15_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f16_h,XDIM*sizeof(float),f16_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f17_h,XDIM*sizeof(float),f17_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); // hipMemcpy2D(f18_h,XDIM*sizeof(float),f18_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); hipMemcpy2D(fA_h,XDIM*sizeof(float),fA_d,pitch,XDIM*sizeof(float),YDIM*ZDIM*19,hipMemcpyDeviceToHost); // cout<<"f1_h is "<<f1_h[0]<<endl; //hipMemcpy(f0_h, f0_d.ptr, memsize, hipMemcpyDeviceToHost); hipMemcpy(image_h, image_d, memsize_int, hipMemcpyDeviceToHost); // cout<<image_h[0]<<endl; // cout<<"test_d: "<<test_h[0]<<endl; // for(i = 0; i<n; i++){ // cout<<f0_h[i]<<","; // } output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\"\n"; output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM<<"\n"; int row = 0; int col = 0; int dep = 0; i = 0; float rho, u, v, w; int j; for(dep = 0; dep<ZDIM; dep++){ for(row = 0; row<YDIM; row++){ for(col = 0; col<XDIM; col++){ i = dep*XDIM*YDIM+row*XDIM+col; // rho = 0; rho = fA_h[i]; for(j = 1; j<19; j++) rho+=fA_h[i+XDIM*YDIM*ZDIM*j]; // rho = f0_h[i]+f1_h[i]+f2_h[i]+f3_h[i]+f4_h[i]+f5_h[i]+f6_h[i]+f7_h[i]+f8_h[i]+f9_h[i]+ // f10_h[i]+f11_h[i]+f12_h[i]+f13_h[i]+f14_h[i]+f15_h[i]+f16_h[i]+f17_h[i]+f18_h[i]; u = fA_h[i+XDIM*YDIM*ZDIM*1]-fA_h[i+XDIM*YDIM*ZDIM*3]+fA_h[i+XDIM*YDIM*ZDIM*5]-fA_h[i+XDIM*YDIM*ZDIM*6]- fA_h[i+XDIM*YDIM*ZDIM*7]+fA_h[i+XDIM*YDIM*ZDIM*8]+fA_h[i+XDIM*YDIM*ZDIM*10]-fA_h[i+XDIM*YDIM*ZDIM*12] +fA_h[i+XDIM*YDIM*ZDIM*15]-fA_h[i+XDIM*YDIM*ZDIM*17]; v = fA_h[i+XDIM*YDIM*ZDIM*2]-fA_h[i+XDIM*YDIM*ZDIM*4]+fA_h[i+XDIM*YDIM*ZDIM*5]+fA_h[i+XDIM*YDIM*ZDIM*6]-fA_h[i+XDIM*YDIM*ZDIM*7]-fA_h[i+XDIM*YDIM*ZDIM*8]+fA_h[i+XDIM*YDIM*ZDIM*11]-fA_h[i+XDIM*YDIM*ZDIM*13]+fA_h[i+XDIM*YDIM*ZDIM*16]-fA_h[i+XDIM*YDIM*ZDIM*18]; w = fA_h[i+XDIM*YDIM*ZDIM*9]+fA_h[i+XDIM*YDIM*ZDIM*10]+fA_h[i+XDIM*YDIM*ZDIM*11]+fA_h[i+XDIM*YDIM*ZDIM*12]+fA_h[i+XDIM*YDIM*ZDIM*13]-fA_h[i+XDIM*YDIM*ZDIM*14]-fA_h[i+XDIM*YDIM*ZDIM*15]-fA_h[i+XDIM*YDIM*ZDIM*16]-fA_h[i+XDIM*YDIM*ZDIM*17]-fA_h[i+XDIM*YDIM*ZDIM*18]; output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<w<<","<<rho<<endl; // output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<fA_h[i+XDIM*YDIM*ZDIM*1]<<","<<rho<<endl; } } } output.close(); // cout<<endl<<fA_h[1280+81920]; // cout<<endl<<fA_h[1281+81920]; // cout<<endl<<fA_h[1282+81920]; hipFree(image_d); // hipFree(f0_dA); // hipFree(f1_dA); // hipFree(f2_dA); // hipFree(f3_dA); // hipFree(f4_dA); // hipFree(f5_dA); // hipFree(f6_dA); // hipFree(f7_dA); // hipFree(f8_dA); // hipFree(f9_dA); // hipFree(f10_dA); // hipFree(f11_dA); // hipFree(f12_dA); // hipFree(f13_dA); // hipFree(f14_dA); // hipFree(f15_dA); // hipFree(f16_dA); // hipFree(f17_dA); // hipFree(f18_dA); // hipFree(f0_dB); // hipFree(f1_dB); // hipFree(f2_dB); // hipFree(f3_dB); // hipFree(f4_dB); // hipFree(f5_dB); // hipFree(f6_dB); // hipFree(f7_dB); // hipFree(f8_dB); // hipFree(f9_dB); // hipFree(f10_dB); // hipFree(f11_dB); // hipFree(f12_dB); // hipFree(f13_dB); // hipFree(f14_dB); // hipFree(f15_dB); // hipFree(f16_dB); // hipFree(f17_dB); // hipFree(f18_dB); hipFree(fA_d); hipFree(fB_d); return(0); }
59e6310790ef151fc46606d7600bf3a4d22634b8.cu
#include <cuda.h> //#include <cutil.h> #include <iostream> #include <ostream> #include <fstream> //#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h" using namespace std; #define BLOCKSIZEX 256 #define BLOCKSIZEY 1 #define BLOCKSIZEZ 1 #define XDIM 256 #define YDIM 128 #define ZDIM 32 #define TMAX 1000 #define RE 100.f//100.f; #define UMAX 0.08f #define METHOD "SHARED" //SINGLE,HYB,TEXT,SHARED //#define CHARLENGTH = XDIM-2.f; //#define BLOCKSIZE 16; //int const XDIM = 32; //int const YDIM = 32; #include <sys/time.h> #include <time.h> inline __device__ int ImageFcn(int x, int y, int z){ if(y == 0 || z == 0 || x == XDIM-1 || y == YDIM-1 || z == ZDIM-1) return 1; else if(x == 0) return 3; else return 0; } texture<float,2,cudaReadModeElementType> texRef_f0A; texture<float,2,cudaReadModeElementType> texRef_f1A; texture<float,2,cudaReadModeElementType> texRef_f2A; texture<float,2,cudaReadModeElementType> texRef_f3A; texture<float,2,cudaReadModeElementType> texRef_f4A; texture<float,2,cudaReadModeElementType> texRef_f5A; texture<float,2,cudaReadModeElementType> texRef_f6A; texture<float,2,cudaReadModeElementType> texRef_f7A; texture<float,2,cudaReadModeElementType> texRef_f8A; texture<float,2,cudaReadModeElementType> texRef_f9A; texture<float,2,cudaReadModeElementType> texRef_f10A; texture<float,2,cudaReadModeElementType> texRef_f11A; texture<float,2,cudaReadModeElementType> texRef_f12A; texture<float,2,cudaReadModeElementType> texRef_f13A; texture<float,2,cudaReadModeElementType> texRef_f14A; texture<float,2,cudaReadModeElementType> texRef_f15A; texture<float,2,cudaReadModeElementType> texRef_f16A; texture<float,2,cudaReadModeElementType> texRef_f17A; texture<float,2,cudaReadModeElementType> texRef_f18A; texture<float,2,cudaReadModeElementType> texRef_f0B; texture<float,2,cudaReadModeElementType> texRef_f1B; texture<float,2,cudaReadModeElementType> texRef_f2B; texture<float,2,cudaReadModeElementType> texRef_f3B; texture<float,2,cudaReadModeElementType> texRef_f4B; texture<float,2,cudaReadModeElementType> texRef_f5B; texture<float,2,cudaReadModeElementType> texRef_f6B; texture<float,2,cudaReadModeElementType> texRef_f7B; texture<float,2,cudaReadModeElementType> texRef_f8B; texture<float,2,cudaReadModeElementType> texRef_f9B; texture<float,2,cudaReadModeElementType> texRef_f10B; texture<float,2,cudaReadModeElementType> texRef_f11B; texture<float,2,cudaReadModeElementType> texRef_f12B; texture<float,2,cudaReadModeElementType> texRef_f13B; texture<float,2,cudaReadModeElementType> texRef_f14B; texture<float,2,cudaReadModeElementType> texRef_f15B; texture<float,2,cudaReadModeElementType> texRef_f16B; texture<float,2,cudaReadModeElementType> texRef_f17B; texture<float,2,cudaReadModeElementType> texRef_f18B; int timeval_subtract (double *result, struct timeval *x, struct timeval *y) { struct timeval result0; /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (y->tv_usec - x->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. tv_usec is certainly positive. */ result0.tv_sec = x->tv_sec - y->tv_sec; result0.tv_usec = x->tv_usec - y->tv_usec; *result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } inline __device__ void bgk_collide(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float omega) { float rho,u,v,w; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+ f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; // float usqr = u*u+v*v+w*w; float usqr = fma(u,u,fma(v,v,w*w)); // f0 -= omega*fma(-0.3333333333f,(fma(-1.5f,usqr,rho)),f0);//(f0 -0.3333333333f*(fma(-1.5f,usqr,rho)));//rho-1.5f*usqr)); // f1 -= omega*fma(-0.0555555556f,fma(3.0f, u ,rho)+fma(4.5f,u*u,-1.5f*usqr),f1);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f2 -= omega*fma(-0.0555555556f,fma(3.0f, v ,rho)+fma(4.5f,v*v,-1.5f*usqr),f2);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f3 -= omega*fma(-0.0555555556f,fma(3.0f, u ,rho)+fma(4.5f,u*u,-1.5f*usqr),f3);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f4 -= omega*fma(-0.0555555556f,fma(3.0f, v ,rho)+fma(4.5f,v*v,-1.5f*usqr),f4);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f5 -= omega*fma(-0.0555555556f,fma(3.0f,( u+v),rho)+fma(4.5f,( u+v)*( u+v),-1.5f*usqr),f5 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f6 -= omega*fma(-0.0555555556f,fma(3.0f,(-u+v),rho)+fma(4.5f,(-u+v)*(-u+v),-1.5f*usqr),f6 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f7 -= omega*fma(-0.0555555556f,fma(3.0f,(-u-v),rho)+fma(4.5f,(-u-v)*(-u-v),-1.5f*usqr),f7 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f8 -= omega*fma(-0.0555555556f,fma(3.0f,( u-v),rho)+fma(4.5f,( u-v)*( u-v),-1.5f*usqr),f8 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f9 -= omega*fma(-0.0555555556f,fma(3.0f,( w),rho)+fma(4.5f,( w)*( w),-1.5f*usqr),f9 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f10-= omega*fma(-0.0277777778f,fma(3.0f,( u+w),rho)+fma(4.5f,( u+w)*( u+w),-1.5f*usqr),f10);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f11-= omega*fma(-0.0277777778f,fma(3.0f,( v+w),rho)+fma(4.5f,( v+w)*( v+w),-1.5f*usqr),f11);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f12-= omega*fma(-0.0277777778f,fma(3.0f,(-u+w),rho)+fma(4.5f,(-u+w)*(-u+w),-1.5f*usqr),f12);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f13-= omega*fma(-0.0277777778f,fma(3.0f,(-v+w),rho)+fma(4.5f,(-v+w)*(-v+w),-1.5f*usqr),f13);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f14-= omega*fma(-0.0555555556f,fma(3.0f,( -w),rho)+fma(4.5f,( -w)*( -w),-1.5f*usqr),f14);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f15-= omega*fma(-0.0277777778f,fma(3.0f,( u-w),rho)+fma(4.5f,( u-w)*( u-w),-1.5f*usqr),f15);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f16-= omega*fma(-0.0277777778f,fma(3.0f,( v-w),rho)+fma(4.5f,( v-w)*( v-w),-1.5f*usqr),f16);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f17-= omega*fma(-0.0277777778f,fma(3.0f,(-u-w),rho)+fma(4.5f,(-u-w)*(-u-w),-1.5f*usqr),f17);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f18-= omega*fma(-0.0277777778f,fma(3.0f,(-v-w),rho)+fma(4.5f,(-v-w)*(-v-w),-1.5f*usqr),f18);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); f0 = f0 -omega*(f0 -0.3333333333f*(rho-1.5f*usqr)); f1 = f1 -omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); f2 = f2 -omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr)); f3 = f3 -omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr)); f4 = f4 -omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr)); f5 = f5 -omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)); f6 = f6 -omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr)); f7 = f7 -omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr)); f8 = f8 -omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)); f9 = f9 -omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr)); f10= f10-omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)); f11= f11-omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr)); f12= f12-omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr)); f13= f13-omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr)); f14= f14-omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr)); f15= f15-omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)); f16= f16-omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr)); f17= f17-omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr)); f18= f18-omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr)); } inline __device__ void mrt_collide(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float omega) { //float rho,u,v,w; float u,v,w; // rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+ // f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18; //m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18; //m1 = -30.f*f0+-11.f*(f1+f2+f3+f4+f9+f14)+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18); //m1 = -19.f*f0+ 19.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18); //m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ; //m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18; //m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18; //COMPUTE M-MEQ m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18); m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w); m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17; m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18; m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18; m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18 -(2.f*u*u-(v*v+w*w)); m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18; m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 -(v*v-w*w); m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ; m13 = f5+ - f6+ f7+ - f8 -u*v; m14 = f11 + - f13 + - f16 + f18 -v*w; m15 = f10 + - f12 + - f15 + f17 -u*w; m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ; m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18; m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18; //m1 -= -11.f*rho+19.f*(u*u+v*v+w*w); // m1 -= 19.f*(u*u+v*v+w*w); // m2 -= -7.53968254f*(u*u+v*v+w*w); //m4 -= -0.66666667f*u;//qx_eq //m6 -= -0.66666667f*v;//qx_eq //m8 -= -0.66666667f*w;//qx_eq // m9 -= (2.f*u*u-(v*v+w*w));//(2.f*.f*.f-(u1*u1+u2*u2));///3.f;//pxx_eq // m11-= (v*v-w*w);//pww_eq // m13-= u*v;//pxy_eq // m14-= v*w;//pyz_eq // m15-= u*w;//pxz_eq f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2); f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10); f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12); f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10); f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12); f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))); f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))); f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))); f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))); f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12); f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))); f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14))); f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))); f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14))); f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12); f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))); f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14))); f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))); f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14))); } inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch) { return (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*ZDIM; } __device__ int dmin(int a, int b) { if (a<b) return a; else return b-1; } __device__ int dmax(int a) { if (a>-1) return a; else return 0; } __global__ void simple_copy(float* fA, float* fB, int *image, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) fB[j] = fA[j];//+0.01f; } __global__ void mrt_d_hybAB(float* fin, float* fout, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y;//; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fin[j]; f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)]; f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)]; f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)]; f11= fin[f_mem(11,x ,y-1,z-1,pitch)]; f13= fin[f_mem(13,x ,y+1,z-1,pitch)]; f14= fin[f_mem(14,x ,y ,z+1,pitch)]; f16= fin[f_mem(16,x ,y-1,z+1,pitch)]; if(z != ZDIM-1){ f18= fin[f_mem(18,x ,y+1,z+1,pitch)]; } f1 = tex2D(texRef_f1A ,x-1,y +YDIM*(z)); f3 = tex2D(texRef_f3A ,x+1,y +YDIM*(z)); f5 = tex2D(texRef_f5A ,x-1,y-1+YDIM*(z)); f6 = tex2D(texRef_f6A ,x+1,y-1+YDIM*(z)); f7 = tex2D(texRef_f7A ,x+1,y+1+YDIM*(z)); f8 = tex2D(texRef_f8A ,x-1,y+1+YDIM*(z)); f15= tex2D(texRef_f15A,x-1,y +YDIM*(z+1)); f17= tex2D(texRef_f17A,x+1,y +YDIM*(z+1)); f10= tex2D(texRef_f10A,x-1,y +YDIM*(z-1)); f12= tex2D(texRef_f12A,x+1,y +YDIM*(z-1)); int im = ImageFcn(x,y,z); if(im == 1){//BB fout[j+pitch*YDIM*ZDIM*1 ] = f3 ; fout[j+pitch*YDIM*ZDIM*2 ] = f4 ; fout[j+pitch*YDIM*ZDIM*3 ] = f1 ; fout[j+pitch*YDIM*ZDIM*4 ] = f2 ; fout[j+pitch*YDIM*ZDIM*5 ] = f7 ; fout[j+pitch*YDIM*ZDIM*6 ] = f8 ; fout[j+pitch*YDIM*ZDIM*7 ] = f5 ; fout[j+pitch*YDIM*ZDIM*8 ] = f6 ; fout[j+pitch*YDIM*ZDIM*9 ] = f14; fout[j+pitch*YDIM*ZDIM*10] = f17; fout[j+pitch*YDIM*ZDIM*11] = f18; fout[j+pitch*YDIM*ZDIM*12] = f15; fout[j+pitch*YDIM*ZDIM*13] = f16; fout[j+pitch*YDIM*ZDIM*14] = f9 ; fout[j+pitch*YDIM*ZDIM*15] = f12; fout[j+pitch*YDIM*ZDIM*16] = f13; fout[j+pitch*YDIM*ZDIM*17] = f10; fout[j+pitch*YDIM*ZDIM*18] = f11; } else{ if(im == 3)//DirichletWest { if(y == 0){ //f2 = f4; f6 = f7; //f11 = f13; //f16 = f18; } else if(y == YDIM-1){ //f4 = f2; f7 = f6; //f13 = f11; //f18 = f16; } if(z == 0){ //f9 = f14; //f10 = f15; //f11 = f16; f12 = f17; //f13 = f18; } if(z == ZDIM-1){ //f14 = f9; //f15 = f10; //f16 = f11; f17 = f12; //f18 = f13; } float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; //rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); //bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void mrt_d_hybBA(float* fin, float* fout, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fin[j]; f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)]; f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)]; f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)]; f11= fin[f_mem(11,x ,y-1,z-1,pitch)]; f13= fin[f_mem(13,x ,y+1,z-1,pitch)]; f14= fin[f_mem(14,x ,y ,z+1,pitch)]; f16= fin[f_mem(16,x ,y-1,z+1,pitch)]; if(z != ZDIM-1){ f18= fin[f_mem(18,x ,y+1,z+1,pitch)]; } f1 = tex2D(texRef_f1B ,x-1,y +YDIM*(z)); f3 = tex2D(texRef_f3B ,x+1,y +YDIM*(z)); f5 = tex2D(texRef_f5B ,x-1,y-1+YDIM*(z)); f6 = tex2D(texRef_f6B ,x+1,y-1+YDIM*(z)); f7 = tex2D(texRef_f7B ,x+1,y+1+YDIM*(z)); f8 = tex2D(texRef_f8B ,x-1,y+1+YDIM*(z)); f15= tex2D(texRef_f15B,x-1,y +YDIM*(z+1)); f17= tex2D(texRef_f17B,x+1,y +YDIM*(z+1)); f10= tex2D(texRef_f10B,x-1,y +YDIM*(z-1)); f12= tex2D(texRef_f12B,x+1,y +YDIM*(z-1)); int im = ImageFcn(x,y,z); if(im == 1){//BB fout[j+pitch*YDIM*ZDIM*1 ] = f3 ; fout[j+pitch*YDIM*ZDIM*2 ] = f4 ; fout[j+pitch*YDIM*ZDIM*3 ] = f1 ; fout[j+pitch*YDIM*ZDIM*4 ] = f2 ; fout[j+pitch*YDIM*ZDIM*5 ] = f7 ; fout[j+pitch*YDIM*ZDIM*6 ] = f8 ; fout[j+pitch*YDIM*ZDIM*7 ] = f5 ; fout[j+pitch*YDIM*ZDIM*8 ] = f6 ; fout[j+pitch*YDIM*ZDIM*9 ] = f14; fout[j+pitch*YDIM*ZDIM*10] = f17; fout[j+pitch*YDIM*ZDIM*11] = f18; fout[j+pitch*YDIM*ZDIM*12] = f15; fout[j+pitch*YDIM*ZDIM*13] = f16; fout[j+pitch*YDIM*ZDIM*14] = f9 ; fout[j+pitch*YDIM*ZDIM*15] = f12; fout[j+pitch*YDIM*ZDIM*16] = f13; fout[j+pitch*YDIM*ZDIM*17] = f10; fout[j+pitch*YDIM*ZDIM*18] = f11; } else{ if(im == 3)//DirichletWest { if(y == 0){ //f2 = f4; f6 = f7; //f11 = f13; //f16 = f18; } else if(y == YDIM-1){ //f4 = f2; f7 = f6; //f13 = f11; //f18 = f16; } if(z == 0){ //f9 = f14; //f10 = f15; //f11 = f16; f12 = f17; //f13 = f18; } if(z == ZDIM-1){ //f14 = f9; //f15 = f10; //f16 = f11; f17 = f12; //f18 = f13; } float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; //rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); //bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void mrt_d_textAB(float* fin, float* fout, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fin[j]; f2 = tex2D(texRef_f2A ,x ,y-1+YDIM*(z)); f4 = tex2D(texRef_f4A ,x ,y+1+YDIM*(z)); f9 = tex2D(texRef_f9A ,x ,y+1+YDIM*(z-1)); f11= tex2D(texRef_f11A,x ,y-1+YDIM*(z-1)); f13= tex2D(texRef_f13A,x ,y+1+YDIM*(z-1)); f14= tex2D(texRef_f14A,x ,y +YDIM*(z+1)); f16= tex2D(texRef_f16A,x ,y-1+YDIM*(z+1)); f18= tex2D(texRef_f18A,x ,y+1+YDIM*(z+1)); f1 = tex2D(texRef_f1A ,x-1,y +YDIM*(z)); f3 = tex2D(texRef_f3A ,x+1,y +YDIM*(z)); f5 = tex2D(texRef_f5A ,x-1,y-1+YDIM*(z)); f6 = tex2D(texRef_f6A ,x+1,y-1+YDIM*(z)); f7 = tex2D(texRef_f7A ,x+1,y+1+YDIM*(z)); f8 = tex2D(texRef_f8A ,x-1,y+1+YDIM*(z)); f15= tex2D(texRef_f15A,x-1,y +YDIM*(z+1)); f17= tex2D(texRef_f17A,x+1,y +YDIM*(z+1)); f10= tex2D(texRef_f10A,x-1,y +YDIM*(z-1)); f12= tex2D(texRef_f12A,x+1,y +YDIM*(z-1)); int im = ImageFcn(x,y,z); if(im == 1){//BB fout[j+pitch*YDIM*ZDIM*1 ] = f3 ; fout[j+pitch*YDIM*ZDIM*2 ] = f4 ; fout[j+pitch*YDIM*ZDIM*3 ] = f1 ; fout[j+pitch*YDIM*ZDIM*4 ] = f2 ; fout[j+pitch*YDIM*ZDIM*5 ] = f7 ; fout[j+pitch*YDIM*ZDIM*6 ] = f8 ; fout[j+pitch*YDIM*ZDIM*7 ] = f5 ; fout[j+pitch*YDIM*ZDIM*8 ] = f6 ; fout[j+pitch*YDIM*ZDIM*9 ] = f14; fout[j+pitch*YDIM*ZDIM*10] = f17; fout[j+pitch*YDIM*ZDIM*11] = f18; fout[j+pitch*YDIM*ZDIM*12] = f15; fout[j+pitch*YDIM*ZDIM*13] = f16; fout[j+pitch*YDIM*ZDIM*14] = f9 ; fout[j+pitch*YDIM*ZDIM*15] = f12; fout[j+pitch*YDIM*ZDIM*16] = f13; fout[j+pitch*YDIM*ZDIM*17] = f10; fout[j+pitch*YDIM*ZDIM*18] = f11; } else{ if(im == 3)//DirichletWest { if(y == 0){ //f2 = f4; f6 = f7; //f11 = f13; //f16 = f18; } else if(y == YDIM-1){ //f4 = f2; f7 = f6; //f13 = f11; //f18 = f16; } if(z == 0){ //f9 = f14; //f10 = f15; //f11 = f16; f12 = f17; //f13 = f18; } if(z == ZDIM-1){ //f14 = f9; //f15 = f10; //f16 = f11; f17 = f12; //f18 = f13; } float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; //rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); //bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void mrt_d_textBA(float* fin, float* fout, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fin[j]; f2 = tex2D(texRef_f2B ,x ,y-1+YDIM*(z)); f4 = tex2D(texRef_f4B ,x ,y+1+YDIM*(z)); f9 = tex2D(texRef_f9B ,x ,y+1+YDIM*(z-1)); f11= tex2D(texRef_f11B,x ,y-1+YDIM*(z-1)); f13= tex2D(texRef_f13B,x ,y+1+YDIM*(z-1)); f14= tex2D(texRef_f14B,x ,y +YDIM*(z+1)); f16= tex2D(texRef_f16B,x ,y-1+YDIM*(z+1)); f18= tex2D(texRef_f18B,x ,y+1+YDIM*(z+1)); f1 = tex2D(texRef_f1B ,x-1,y +YDIM*(z)); f3 = tex2D(texRef_f3B ,x+1,y +YDIM*(z)); f5 = tex2D(texRef_f5B ,x-1,y-1+YDIM*(z)); f6 = tex2D(texRef_f6B ,x+1,y-1+YDIM*(z)); f7 = tex2D(texRef_f7B ,x+1,y+1+YDIM*(z)); f8 = tex2D(texRef_f8B ,x-1,y+1+YDIM*(z)); f15= tex2D(texRef_f15B,x-1,y +YDIM*(z+1)); f17= tex2D(texRef_f17B,x+1,y +YDIM*(z+1)); f10= tex2D(texRef_f10B,x-1,y +YDIM*(z-1)); f12= tex2D(texRef_f12B,x+1,y +YDIM*(z-1)); int im = ImageFcn(x,y,z); if(im == 1){//BB fout[j+pitch*YDIM*ZDIM*1 ] = f3 ; fout[j+pitch*YDIM*ZDIM*2 ] = f4 ; fout[j+pitch*YDIM*ZDIM*3 ] = f1 ; fout[j+pitch*YDIM*ZDIM*4 ] = f2 ; fout[j+pitch*YDIM*ZDIM*5 ] = f7 ; fout[j+pitch*YDIM*ZDIM*6 ] = f8 ; fout[j+pitch*YDIM*ZDIM*7 ] = f5 ; fout[j+pitch*YDIM*ZDIM*8 ] = f6 ; fout[j+pitch*YDIM*ZDIM*9 ] = f14; fout[j+pitch*YDIM*ZDIM*10] = f17; fout[j+pitch*YDIM*ZDIM*11] = f18; fout[j+pitch*YDIM*ZDIM*12] = f15; fout[j+pitch*YDIM*ZDIM*13] = f16; fout[j+pitch*YDIM*ZDIM*14] = f9 ; fout[j+pitch*YDIM*ZDIM*15] = f12; fout[j+pitch*YDIM*ZDIM*16] = f13; fout[j+pitch*YDIM*ZDIM*17] = f10; fout[j+pitch*YDIM*ZDIM*18] = f11; } else{ if(im == 3)//DirichletWest { if(y == 0){ //f2 = f4; f6 = f7; //f11 = f13; //f16 = f18; } else if(y == YDIM-1){ //f4 = f2; f7 = f6; //f13 = f11; //f18 = f16; } if(z == 0){ //f9 = f14; //f10 = f15; //f11 = f16; f12 = f17; //f13 = f18; } if(z == ZDIM-1){ //f14 = f9; //f15 = f10; //f16 = f11; f17 = f12; //f18 = f13; } float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; //rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); //bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void mrt_d_shared(float* fA, float* fB, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,z); __shared__ float f1_s[BLOCKSIZEX]; __shared__ float f3_s[BLOCKSIZEX]; __shared__ float f5_s[BLOCKSIZEX]; __shared__ float f7_s[BLOCKSIZEX]; __shared__ float f6_s[BLOCKSIZEX]; __shared__ float f8_s[BLOCKSIZEX]; __shared__ float f10_s[BLOCKSIZEX]; __shared__ float f12_s[BLOCKSIZEX]; __shared__ float f15_s[BLOCKSIZEX]; __shared__ float f17_s[BLOCKSIZEX]; f1_s[threadIdx.x] = fA[f_mem(1 ,x ,y ,z ,pitch)];//dmax(x-1) f3_s[threadIdx.x] = fA[f_mem(3 ,x ,y ,z ,pitch)];//dmin(x+1,XDIM) // if(y != 0){//takin these out was good f5_s[threadIdx.x] = fA[f_mem(5 ,x ,y-1,z ,pitch)];//dmax(x-1) f6_s[threadIdx.x] = fA[f_mem(6 ,x ,y-1,z ,pitch)];//dmin(x+1,XDIM) // } // if(y != YDIM-1){ f7_s[threadIdx.x] = fA[f_mem(7 ,x ,y+1,z ,pitch)];//dmin(x+1,XDIM) f8_s[threadIdx.x] = fA[f_mem(8 ,x ,y+1,z ,pitch)];//dmax(x-1) // } // if(z != 0){ f10_s[threadIdx.x] = fA[f_mem(10,x ,y ,z-1,pitch)];//dmax(x-1) f12_s[threadIdx.x] = fA[f_mem(12,x ,y ,z-1,pitch)];//dmin(x+1,XDIM) // } // if(z != ZDIM-1){ f15_s[threadIdx.x] = fA[f_mem(15,x ,y ,z+1,pitch)];//dmax(x-1) f17_s[threadIdx.x] = fA[f_mem(17,x ,y ,z+1,pitch)];//dmin(x+1,XDIM) // } __syncthreads(); if(im == 1){//BB float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // if(y != 0){ f4 = fA[f_mem(2 ,x ,y-1,z ,pitch)]; // } // else f4 = 0.f; // if(y != YDIM-1){ f2 = fA[f_mem(4 ,x ,y+1,z ,pitch)]; // } // else f2 = 0.f; // if(z != ZDIM-1){ f9 = fA[f_mem(14,x ,y ,z+1,pitch)]; f13= fA[f_mem(16,x ,y-1,z+1,pitch)]; if(z != ZDIM-1){ f11= fA[f_mem(18,x ,y+1,z+1,pitch)]; } // } // else{ // f9 = 0.f; // f13= 0.f; // f11= 0.f; // } // else // f18 = 0.0f; // if(z != 0){ f14= fA[f_mem(9 ,x ,y ,z-1,pitch)]; f18= fA[f_mem(11,x ,y-1,z-1,pitch)]; f16= fA[f_mem(13,x ,y+1,z-1,pitch)]; // } // else{ // f14= 0.f; // f18= 0.f; // f16= 0.f; // } if(threadIdx.x != XDIM-1){ f1 = f3_s[threadIdx.x+1];//fA[f_mem(3 ,x+1,y ,z ,pitch,YDIM,ZDIM)]; f5 = f7_s[threadIdx.x+1];//fA[f_mem(6 ,x+1,y-1,z ,pitch,YDIM,ZDIM)]; f8 = f6_s[threadIdx.x+1];//fA[f_mem(7 ,x+1,y+1,z ,pitch,YDIM,ZDIM)]; f10=f17_s[threadIdx.x+1];//fA[f_mem(12,x+1,y ,z-1,pitch,YDIM,ZDIM)]; f15=f12_s[threadIdx.x+1];//fA[f_mem(17,x+1,y ,z+1,pitch,YDIM,ZDIM)]; } if(threadIdx.x != 0){ f3 = f1_s[threadIdx.x-1];//fA[f_mem(1 ,x-1,y ,z ,pitch,YDIM,ZDIM)]; f7 = f5_s[threadIdx.x-1];//fA[f_mem(5 ,x-1,y-1,z ,pitch,YDIM,ZDIM)]; f6 = f8_s[threadIdx.x-1];//fA[f_mem(8 ,x-1,y+1,z ,pitch,YDIM,ZDIM)]; f17=f10_s[threadIdx.x-1];//fA[f_mem(10,x-1,y ,z-1,pitch,YDIM,ZDIM)]; f12=f15_s[threadIdx.x-1];//fA[f_mem(15,x-1,y ,z+1,pitch,YDIM,ZDIM)]; } fB[f_mem(1 ,x,y,z,pitch)] = f1 ; fB[f_mem(2 ,x,y,z,pitch)] = f2 ; fB[f_mem(3 ,x,y,z,pitch)] = f3 ; fB[f_mem(4 ,x,y,z,pitch)] = f4 ; fB[f_mem(5 ,x,y,z,pitch)] = f5 ; fB[f_mem(6 ,x,y,z,pitch)] = f6 ; fB[f_mem(7 ,x,y,z,pitch)] = f7 ; fB[f_mem(8 ,x,y,z,pitch)] = f8 ; fB[f_mem(9 ,x,y,z,pitch)] = f9 ; fB[f_mem(10,x,y,z,pitch)] = f10; fB[f_mem(11,x,y,z,pitch)] = f11; fB[f_mem(12,x,y,z,pitch)] = f12; fB[f_mem(13,x,y,z,pitch)] = f13; fB[f_mem(14,x,y,z,pitch)] = f14; fB[f_mem(15,x,y,z,pitch)] = f15; fB[f_mem(16,x,y,z,pitch)] = f16; fB[f_mem(17,x,y,z,pitch)] = f17; fB[f_mem(18,x,y,z,pitch)] = f18; } else{ float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fA[j]; // if(y != 0){ f2 = fA[f_mem(2 ,x ,y-1,z ,pitch)]; // } // if(y != YDIM-1){ f4 = fA[f_mem(4 ,x ,y+1,z ,pitch)]; // } // if(z != ZDIM-1){ f14= fA[f_mem(14,x ,y ,z+1,pitch)]; f16= fA[f_mem(16,x ,y-1,z+1,pitch)]; if(z != ZDIM-1){ f18= fA[f_mem(18,x ,y+1,z+1,pitch)]; } // } // else{ // f18 = 0.0f; // } // if(z != 0){ f9 = fA[f_mem(9 ,x ,y ,z-1,pitch)]; f11= fA[f_mem(11,x ,y-1,z-1,pitch)]; f13= fA[f_mem(13,x ,y+1,z-1,pitch)]; // } if(threadIdx.x != XDIM-1){ f3 = f3_s[threadIdx.x+1];//fA[f_mem(3 ,x+1,y ,z ,pitch,YDIM,ZDIM)]; f6 = f6_s[threadIdx.x+1];//fA[f_mem(6 ,x+1,y-1,z ,pitch,YDIM,ZDIM)]; f7 = f7_s[threadIdx.x+1];//fA[f_mem(7 ,x+1,y+1,z ,pitch,YDIM,ZDIM)]; f12=f12_s[threadIdx.x+1];//fA[f_mem(12,x+1,y ,z-1,pitch,YDIM,ZDIM)]; f17=f17_s[threadIdx.x+1];//fA[f_mem(17,x+1,y ,z+1,pitch,YDIM,ZDIM)]; } if(threadIdx.x != 0){ f1 = f1_s[threadIdx.x-1];//fA[f_mem(1 ,x-1,y ,z ,pitch,YDIM,ZDIM)]; f5 = f5_s[threadIdx.x-1];//fA[f_mem(5 ,x-1,y-1,z ,pitch,YDIM,ZDIM)]; f8 = f8_s[threadIdx.x-1];//fA[f_mem(8 ,x-1,y+1,z ,pitch,YDIM,ZDIM)]; f10=f10_s[threadIdx.x-1];//fA[f_mem(10,x-1,y ,z-1,pitch,YDIM,ZDIM)]; f15=f15_s[threadIdx.x-1];//fA[f_mem(15,x-1,y ,z+1,pitch,YDIM,ZDIM)]; } if(im == 3)//DirichletWest { if(y == 0){ f2 = f4; f6 = f7; f11 = f13; f16 = f18; } else if(y == YDIM-1){ f4 = f2; f7 = f6; f13 = f11; f18 = f16; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f12 = f17; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f17 = f12; f18 = f13; } float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); //bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fB[f_mem(0 ,x,y,z,pitch)] = f0 ; fB[f_mem(1 ,x,y,z,pitch)] = f1 ; fB[f_mem(2 ,x,y,z,pitch)] = f2 ; fB[f_mem(3 ,x,y,z,pitch)] = f3 ; fB[f_mem(4 ,x,y,z,pitch)] = f4 ; fB[f_mem(5 ,x,y,z,pitch)] = f5 ; fB[f_mem(6 ,x,y,z,pitch)] = f6 ; fB[f_mem(7 ,x,y,z,pitch)] = f7 ; fB[f_mem(8 ,x,y,z,pitch)] = f8 ; fB[f_mem(9 ,x,y,z,pitch)] = f9 ; fB[f_mem(10,x,y,z,pitch)] = f10; fB[f_mem(11,x,y,z,pitch)] = f11; fB[f_mem(12,x,y,z,pitch)] = f12; fB[f_mem(13,x,y,z,pitch)] = f13; fB[f_mem(14,x,y,z,pitch)] = f14; fB[f_mem(15,x,y,z,pitch)] = f15; fB[f_mem(16,x,y,z,pitch)] = f16; fB[f_mem(17,x,y,z,pitch)] = f17; fB[f_mem(18,x,y,z,pitch)] = f18; } } //{ // int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem // int y = threadIdx.y+blockIdx.y*blockDim.y; // int z = threadIdx.z+blockIdx.z*blockDim.z; // int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) // int im = ImageFcn(x,y,z); // // __shared__ float f1_s[BLOCKSIZEX]; // __shared__ float f3_s[BLOCKSIZEX]; // __shared__ float f5_s[BLOCKSIZEX]; // __shared__ float f7_s[BLOCKSIZEX]; // __shared__ float f6_s[BLOCKSIZEX]; // __shared__ float f8_s[BLOCKSIZEX]; // __shared__ float f10_s[BLOCKSIZEX]; // __shared__ float f12_s[BLOCKSIZEX]; // __shared__ float f15_s[BLOCKSIZEX]; // __shared__ float f17_s[BLOCKSIZEX]; // // f1_s[threadIdx.x] = fA[f_mem(1 ,x ,y ,z ,pitch)];//dmax(x-1) // f3_s[threadIdx.x] = fA[f_mem(3 ,x ,y ,z ,pitch)];//dmin(x+1,XDIM) //// if(y != 0){//takin these out was good // f5_s[threadIdx.x] = fA[f_mem(5 ,x ,y-1,z ,pitch)];//dmax(x-1) // f6_s[threadIdx.x] = fA[f_mem(6 ,x ,y-1,z ,pitch)];//dmin(x+1,XDIM) //// } //// if(y != YDIM-1){ // f7_s[threadIdx.x] = fA[f_mem(7 ,x ,y+1,z ,pitch)];//dmin(x+1,XDIM) // f8_s[threadIdx.x] = fA[f_mem(8 ,x ,y+1,z ,pitch)];//dmax(x-1) //// } //// if(z != 0){ // f10_s[threadIdx.x] = fA[f_mem(10,x ,y ,z-1,pitch)];//dmax(x-1) // f12_s[threadIdx.x] = fA[f_mem(12,x ,y ,z-1,pitch)];//dmin(x+1,XDIM) //// } //// if(z != ZDIM-1){ // f15_s[threadIdx.x] = fA[f_mem(15,x ,y ,z+1,pitch)];//dmax(x-1) // f17_s[threadIdx.x] = fA[f_mem(17,x ,y ,z+1,pitch)];//dmin(x+1,XDIM) //// } // __syncthreads(); // // float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // // f0 = fA[j]; // f2 = fA[f_mem(2 ,x ,y-1,z ,pitch)]; // f4 = fA[f_mem(4 ,x ,y+1,z ,pitch)]; // f14= fA[f_mem(14,x ,y ,z+1,pitch)]; // f16= fA[f_mem(16,x ,y-1,z+1,pitch)]; // if(z != ZDIM-1){ // f18= fA[f_mem(18,x ,y+1,z+1,pitch)]; // } //// else //// f18 = 0.0f; // // f9 = fA[f_mem(9 ,x ,y ,z-1,pitch)]; // f11= fA[f_mem(11,x ,y-1,z-1,pitch)]; // f13= fA[f_mem(13,x ,y+1,z-1,pitch)]; // // if(threadIdx.x != XDIM-1){ // f3 = f3_s[threadIdx.x+1];//fA[f_mem(3 ,x+1,y ,z ,pitch,YDIM,ZDIM)]; // f6 = f6_s[threadIdx.x+1];//fA[f_mem(6 ,x+1,y-1,z ,pitch,YDIM,ZDIM)]; // f7 = f7_s[threadIdx.x+1];//fA[f_mem(7 ,x+1,y+1,z ,pitch,YDIM,ZDIM)]; // f12=f12_s[threadIdx.x+1];//fA[f_mem(12,x+1,y ,z-1,pitch,YDIM,ZDIM)]; // f17=f17_s[threadIdx.x+1];//fA[f_mem(17,x+1,y ,z+1,pitch,YDIM,ZDIM)]; // } // if(threadIdx.x != 0){ // f1 = f1_s[threadIdx.x-1];//fA[f_mem(1 ,x-1,y ,z ,pitch,YDIM,ZDIM)]; // f5 = f5_s[threadIdx.x-1];//fA[f_mem(5 ,x-1,y-1,z ,pitch,YDIM,ZDIM)]; // f8 = f8_s[threadIdx.x-1];//fA[f_mem(8 ,x-1,y+1,z ,pitch,YDIM,ZDIM)]; // f10=f10_s[threadIdx.x-1];//fA[f_mem(10,x-1,y ,z-1,pitch,YDIM,ZDIM)]; // f15=f15_s[threadIdx.x-1];//fA[f_mem(15,x-1,y ,z+1,pitch,YDIM,ZDIM)]; // } // // if(im == 1){//BB // fB[f_mem(1 ,x,y,z,pitch)] = f3 ; // fB[f_mem(2 ,x,y,z,pitch)] = f4 ; // fB[f_mem(3 ,x,y,z,pitch)] = f1 ; // fB[f_mem(4 ,x,y,z,pitch)] = f2 ; // fB[f_mem(5 ,x,y,z,pitch)] = f7 ; // fB[f_mem(6 ,x,y,z,pitch)] = f8 ; // fB[f_mem(7 ,x,y,z,pitch)] = f5 ; // fB[f_mem(8 ,x,y,z,pitch)] = f6 ; // fB[f_mem(9 ,x,y,z,pitch)] = f14; // fB[f_mem(10,x,y,z,pitch)] = f17; // fB[f_mem(11,x,y,z,pitch)] = f18; // fB[f_mem(12,x,y,z,pitch)] = f15; // fB[f_mem(13,x,y,z,pitch)] = f16; // fB[f_mem(14,x,y,z,pitch)] = f9 ; // fB[f_mem(15,x,y,z,pitch)] = f12; // fB[f_mem(16,x,y,z,pitch)] = f13; // fB[f_mem(17,x,y,z,pitch)] = f10; // fB[f_mem(18,x,y,z,pitch)] = f11; // } // else{ // if(im == 3)//DirichletWest // { // if(y == 0){ //// f2 = f4; // f6 = f7; //// f11 = f13; //// f16 = f18; // } // else if(y == YDIM-1){ //// f4 = f2; // f7 = f6; //// f13 = f11; //// f18 = f16; // } // if(z == 0){ //// f9 = f14; //// f10 = f15; //// f11 = f16; // f12 = f17; //// f13 = f18; // } // else if(z == ZDIM-1){ //// f14 = f9; //// f15 = f10; //// f16 = f11; // f17 = f12; //// f18 = f13; // } // float u,v,w;//,rho; // u = 0.0f;//*PoisProf(zcoord)*1.5; // v = UMAX;//0.0; // w = 0.0f; // // f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; // f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); // f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); // f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); // f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); // // } // // mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); // //bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); // // // //fB[f_mem(0 ,x,y,z,pitch)] = f0 ; // fB[j] = f0 ; // fB[f_mem(1 ,x,y,z,pitch)] = f1 ; // fB[f_mem(2 ,x,y,z,pitch)] = f2 ; // fB[f_mem(3 ,x,y,z,pitch)] = f3 ; // fB[f_mem(4 ,x,y,z,pitch)] = f4 ; // fB[f_mem(5 ,x,y,z,pitch)] = f5 ; // fB[f_mem(6 ,x,y,z,pitch)] = f6 ; // fB[f_mem(7 ,x,y,z,pitch)] = f7 ; // fB[f_mem(8 ,x,y,z,pitch)] = f8 ; // fB[f_mem(9 ,x,y,z,pitch)] = f9 ; // fB[f_mem(10,x,y,z,pitch)] = f10; // fB[f_mem(11,x,y,z,pitch)] = f11; // fB[f_mem(12,x,y,z,pitch)] = f12; // fB[f_mem(13,x,y,z,pitch)] = f13; // fB[f_mem(14,x,y,z,pitch)] = f14; // fB[f_mem(15,x,y,z,pitch)] = f15; // fB[f_mem(16,x,y,z,pitch)] = f16; // fB[f_mem(17,x,y,z,pitch)] = f17; // fB[f_mem(18,x,y,z,pitch)] = f18; // } //} __global__ void mrt_d_single(float* fA, float* fB, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,z); float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; f0 = fA[j]; f1 = fA[f_mem(1 ,x-1,y ,z ,pitch)]; f3 = fA[f_mem(3 ,x+1,y ,z ,pitch)]; f2 = fA[f_mem(2 ,x ,y-1,z ,pitch)]; f5 = fA[f_mem(5 ,x-1,y-1,z ,pitch)]; f6 = fA[f_mem(6 ,x+1,y-1,z ,pitch)]; f4 = fA[f_mem(4 ,x ,y+1,z ,pitch)]; f7 = fA[f_mem(7 ,x+1,y+1,z ,pitch)]; f8 = fA[f_mem(8 ,x-1,y+1,z ,pitch)]; f9 = fA[f_mem(9 ,x ,y ,z-1,pitch)]; f10= fA[f_mem(10,x-1,y ,z-1,pitch)]; f11= fA[f_mem(11,x ,y-1,z-1,pitch)]; f12= fA[f_mem(12,x+1,y ,z-1,pitch)]; f13= fA[f_mem(13,x ,y+1,z-1,pitch)]; f14= fA[f_mem(14,x ,y ,z+1,pitch)]; f15= fA[f_mem(15,x-1,y ,z+1,pitch)]; f16= fA[f_mem(16,x ,y-1,z+1,pitch)]; f17= fA[f_mem(17,x+1,y ,z+1,pitch)]; f18= fA[f_mem(18,x ,y+1,dmin(z+1,ZDIM),pitch)]; if(im == 1){//BB fB[f_mem(1 ,x,y,z,pitch)] = f3 ; fB[f_mem(2 ,x,y,z,pitch)] = f4 ; fB[f_mem(3 ,x,y,z,pitch)] = f1 ; fB[f_mem(4 ,x,y,z,pitch)] = f2 ; fB[f_mem(5 ,x,y,z,pitch)] = f7 ; fB[f_mem(6 ,x,y,z,pitch)] = f8 ; fB[f_mem(7 ,x,y,z,pitch)] = f5 ; fB[f_mem(8 ,x,y,z,pitch)] = f6 ; fB[f_mem(9 ,x,y,z,pitch)] = f14; fB[f_mem(10,x,y,z,pitch)] = f17; fB[f_mem(11,x,y,z,pitch)] = f18; fB[f_mem(12,x,y,z,pitch)] = f15; fB[f_mem(13,x,y,z,pitch)] = f16; fB[f_mem(14,x,y,z,pitch)] = f9 ; fB[f_mem(15,x,y,z,pitch)] = f12; fB[f_mem(16,x,y,z,pitch)] = f13; fB[f_mem(17,x,y,z,pitch)] = f10; fB[f_mem(18,x,y,z,pitch)] = f11; } else{ if(im == 3)//DirichletWest { if(y == 0){ //f2 = f4; f6 = f7; //f11 = f13; //f16 = f18; } else if(y == YDIM-1){ //f4 = f2; f7 = f6; //f13 = f11; //f18 = f16; } if(z == 0){ //f9 = f14; //f10 = f15; //f11 = f16; f12 = f17; //f13 = f18; } else if(z == ZDIM-1){ //f14 = f9; //f15 = f10; //f16 = f11; f17 = f12; //f18 = f13; } float u,v,w;//,rho; u = 0.0f;//*PoisProf(zcoord)*1.5; v = UMAX;//0.0; w = 0.0f; f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); } mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); //bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fB[f_mem(0 ,x,y,z,pitch)] = f0 ; fB[f_mem(1 ,x,y,z,pitch)] = f1 ; fB[f_mem(2 ,x,y,z,pitch)] = f2 ; fB[f_mem(3 ,x,y,z,pitch)] = f3 ; fB[f_mem(4 ,x,y,z,pitch)] = f4 ; fB[f_mem(5 ,x,y,z,pitch)] = f5 ; fB[f_mem(6 ,x,y,z,pitch)] = f6 ; fB[f_mem(7 ,x,y,z,pitch)] = f7 ; fB[f_mem(8 ,x,y,z,pitch)] = f8 ; fB[f_mem(9 ,x,y,z,pitch)] = f9 ; fB[f_mem(10,x,y,z,pitch)] = f10; fB[f_mem(11,x,y,z,pitch)] = f11; fB[f_mem(12,x,y,z,pitch)] = f12; fB[f_mem(13,x,y,z,pitch)] = f13; fB[f_mem(14,x,y,z,pitch)] = f14; fB[f_mem(15,x,y,z,pitch)] = f15; fB[f_mem(16,x,y,z,pitch)] = f16; fB[f_mem(17,x,y,z,pitch)] = f17; fB[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void initialize_single(float *f, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) float u,v,w,rho,usqr; rho = 1.f; u = 0.0f; v = 0.0f; w = 0.0f; //if(x == 3 ) u = 0.1f; usqr = u*u+v*v+w*w; f[j+0 *pitch*YDIM*ZDIM]= 1.0f/3.0f*(rho-1.5f*usqr); f[j+1 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); f[j+2 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); f[j+3 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); f[j+4 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); f[j+5 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); f[j+6 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f[j+7 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f[j+8 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); f[j+9 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); f[j+10*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); f[j+11*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr); f[j+12*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); f[j+13*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr); f[j+14*pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); f[j+15*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); f[j+16*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); f[j+17*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f[j+18*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); } __global__ void initialize(float* f0, float* f1, float* f2, float* f3, float* f4, float* f5, float* f6, float* f7, float* f8, float* f9, float* f10, float* f11, float* f12, float* f13, float* f14, float* f15, float* f16, float* f17, float* f18, size_t pitch)//pitch in elements //__global__ void initialize(void** f0in, void** f1in, // int w, int h, int pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; // int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) // f1out[j] = tex2D(texRef_f2A,x,y+h*z); float u,v,w,rho,feq,usqr; rho = 1.0f; u = 0.0f; v = 0.0f; w = 0.0f; //if(x == 3 ) u = 0.1f; usqr = u*u+v*v+w*w; feq = 1.0f/3.0f*(rho-1.5f*usqr); f0[j] = feq; feq = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); f1[j] = feq; feq = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); f2[j] = feq; feq = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); f3[j] = feq; feq = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); f4[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); f5[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f6[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f7[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); f8[j] = feq; feq = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); f9[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); f10[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr); f11[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); f12[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr); f13[j] = feq; feq = 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); f14[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); f15[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); f16[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f17[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); f18[j] = feq; } int main(int argc, char *argv[]) { // float *f0_h, *f1_h, *f2_h, *f3_h, *f4_h, *f5_h, *f6_h, *f7_h, *f8_h, *f9_h; // float *f10_h, *f11_h, *f12_h, *f13_h, *f14_h, *f15_h, *f16_h, *f17_h, *f18_h; // float *f0_dA, *f1_dA, *f2_dA, *f3_dA, *f4_dA, *f5_dA, *f6_dA, *f7_dA, *f8_dA, *f9_dA; // float *f10_dA, *f11_dA, *f12_dA, *f13_dA, *f14_dA, *f15_dA, *f16_dA, *f17_dA, *f18_dA; // float *f0_dB, *f1_dB, *f2_dB, *f3_dB, *f4_dB, *f5_dB, *f6_dB, *f7_dB, *f8_dB, *f9_dB; // float *f10_dB, *f11_dB, *f12_dB, *f13_dB, *f14_dB, *f15_dB, *f16_dB, *f17_dB, *f18_dB; int *image_d, *image_h; //cudaPitchedPtr f0_d; ofstream output; output.open ("LBM1_out.dat"); size_t memsize, memsize_int; size_t pitch; int i, n, nBlocks; float omega, CharLength; CharLength = XDIM-2.f; omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f); cout<<"omega: "<<omega<<endl; cout<<"blocksize: "<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl; cout<<"grid: "<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl; cout<<"TMAX: "<<TMAX<<endl; nBlocks = (XDIM/BLOCKSIZEX+XDIM%BLOCKSIZEX)*(YDIM/BLOCKSIZEY+YDIM%BLOCKSIZEY) *(ZDIM/BLOCKSIZEZ+ZDIM%BLOCKSIZEZ); int B = BLOCKSIZEX*BLOCKSIZEY*BLOCKSIZEZ; n = nBlocks*B;//block*dimx*dimy cout<<"nBlocks:"<<nBlocks<<endl; dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ); dim3 grid(XDIM/BLOCKSIZEX,YDIM/BLOCKSIZEY,ZDIM/BLOCKSIZEZ); memsize = n*sizeof(float); memsize_int = n*sizeof(int); cudaExtent extent = make_cudaExtent(XDIM*sizeof(float),YDIM,ZDIM); image_h = (int *)malloc(memsize_int); float *fA_h,*fA_d,*fB_d; fA_h = (float *)malloc(memsize*19); cudaMallocPitch((void **) &fA_d, &pitch, XDIM*sizeof(float), YDIM*ZDIM*19); cudaMallocPitch((void **) &fB_d, &pitch, XDIM*sizeof(float), YDIM*ZDIM*19); cudaMalloc((void **) &image_d, memsize_int); cout<<pitch<<endl; size_t pitch_elements = pitch/sizeof(float); cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); for (i = 0; i < n*19; i++) { fA_h[i] = i; } for (i = 0; i < n; i++) { int x = i%XDIM; int y = (i/XDIM)%YDIM; int z = (i/XDIM)/YDIM; fA_h[i] = 0; image_h[i] = 0; if(x < 1) image_h[i] = 1;//DirichletWest if(x > XDIM-2) image_h[i] = 1;//BB if(y < 1) image_h[i] = 1;//BB if(y > YDIM-2) image_h[i] = 1;//BB if(z < 1) image_h[i] = 1;//DirichletWest if(z > ZDIM-2) image_h[i] = 1;//BB } cudaMemcpy(image_d, image_h, memsize_int, cudaMemcpyHostToDevice); if(true)//texture settings { texRef_f0B.normalized = false; texRef_f1B.normalized = false; texRef_f2B.normalized = false; texRef_f3B.normalized = false; texRef_f4B.normalized = false; texRef_f5B.normalized = false; texRef_f6B.normalized = false; texRef_f7B.normalized = false; texRef_f8B.normalized = false; texRef_f9B.normalized = false; texRef_f10B.normalized = false; texRef_f11B.normalized = false; texRef_f12B.normalized = false; texRef_f13B.normalized = false; texRef_f14B.normalized = false; texRef_f15B.normalized = false; texRef_f16B.normalized = false; texRef_f17B.normalized = false; texRef_f18B.normalized = false; texRef_f0B.filterMode = cudaFilterModePoint; texRef_f1B.filterMode = cudaFilterModePoint; texRef_f2B.filterMode = cudaFilterModePoint; texRef_f3B.filterMode = cudaFilterModePoint; texRef_f4B.filterMode = cudaFilterModePoint; texRef_f5B.filterMode = cudaFilterModePoint; texRef_f6B.filterMode = cudaFilterModePoint; texRef_f7B.filterMode = cudaFilterModePoint; texRef_f8B.filterMode = cudaFilterModePoint; texRef_f9B.filterMode = cudaFilterModePoint; texRef_f10B.filterMode = cudaFilterModePoint; texRef_f11B.filterMode = cudaFilterModePoint; texRef_f12B.filterMode = cudaFilterModePoint; texRef_f13B.filterMode = cudaFilterModePoint; texRef_f14B.filterMode = cudaFilterModePoint; texRef_f15B.filterMode = cudaFilterModePoint; texRef_f16B.filterMode = cudaFilterModePoint; texRef_f17B.filterMode = cudaFilterModePoint; texRef_f18B.filterMode = cudaFilterModePoint; texRef_f0A.normalized = false; texRef_f1A.normalized = false; texRef_f2A.normalized = false; texRef_f3A.normalized = false; texRef_f4A.normalized = false; texRef_f5A.normalized = false; texRef_f6A.normalized = false; texRef_f7A.normalized = false; texRef_f8A.normalized = false; texRef_f9A.normalized = false; texRef_f10A.normalized = false; texRef_f11A.normalized = false; texRef_f12A.normalized = false; texRef_f13A.normalized = false; texRef_f14A.normalized = false; texRef_f15A.normalized = false; texRef_f16A.normalized = false; texRef_f17A.normalized = false; texRef_f18A.normalized = false; texRef_f0A.filterMode = cudaFilterModePoint; texRef_f1A.filterMode = cudaFilterModePoint; texRef_f2A.filterMode = cudaFilterModePoint; texRef_f3A.filterMode = cudaFilterModePoint; texRef_f4A.filterMode = cudaFilterModePoint; texRef_f5A.filterMode = cudaFilterModePoint; texRef_f6A.filterMode = cudaFilterModePoint; texRef_f7A.filterMode = cudaFilterModePoint; texRef_f8A.filterMode = cudaFilterModePoint; texRef_f9A.filterMode = cudaFilterModePoint; texRef_f10A.filterMode = cudaFilterModePoint; texRef_f11A.filterMode = cudaFilterModePoint; texRef_f12A.filterMode = cudaFilterModePoint; texRef_f13A.filterMode = cudaFilterModePoint; texRef_f14A.filterMode = cudaFilterModePoint; texRef_f15A.filterMode = cudaFilterModePoint; texRef_f16A.filterMode = cudaFilterModePoint; texRef_f17A.filterMode = cudaFilterModePoint; texRef_f18A.filterMode = cudaFilterModePoint; } cudaMemcpy2D(fA_d ,pitch,fA_h ,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM*19,cudaMemcpyHostToDevice); cudaMemcpy2D(fB_d ,pitch,fA_h ,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM*19,cudaMemcpyHostToDevice); for (i = 0; i < n*19; i++) { fA_h[i] = 0; } if(true)//bind texture { cudaBindTexture2D(0,&texRef_f0A, fA_d ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f1A, fA_d+pitch_elements*YDIM*ZDIM ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f2A, fA_d+pitch_elements*YDIM*ZDIM*2 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f3A, fA_d+pitch_elements*YDIM*ZDIM*3 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f4A, fA_d+pitch_elements*YDIM*ZDIM*4 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f5A, fA_d+pitch_elements*YDIM*ZDIM*5 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f6A, fA_d+pitch_elements*YDIM*ZDIM*6 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f7A, fA_d+pitch_elements*YDIM*ZDIM*7 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f8A, fA_d+pitch_elements*YDIM*ZDIM*8 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f9A, fA_d+pitch_elements*YDIM*ZDIM*9 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f10A,fA_d+pitch_elements*YDIM*ZDIM*10,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f11A,fA_d+pitch_elements*YDIM*ZDIM*11,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f12A,fA_d+pitch_elements*YDIM*ZDIM*12,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f13A,fA_d+pitch_elements*YDIM*ZDIM*13,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f14A,fA_d+pitch_elements*YDIM*ZDIM*14,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f15A,fA_d+pitch_elements*YDIM*ZDIM*15,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f16A,fA_d+pitch_elements*YDIM*ZDIM*16,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f17A,fA_d+pitch_elements*YDIM*ZDIM*17,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f18A,fA_d+pitch_elements*YDIM*ZDIM*18,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f0B, fB_d ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f1B, fB_d+pitch_elements*YDIM*ZDIM ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f2B, fB_d+pitch_elements*YDIM*ZDIM*2 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f3B, fB_d+pitch_elements*YDIM*ZDIM*3 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f4B, fB_d+pitch_elements*YDIM*ZDIM*4 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f5B, fB_d+pitch_elements*YDIM*ZDIM*5 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f6B, fB_d+pitch_elements*YDIM*ZDIM*6 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f7B, fB_d+pitch_elements*YDIM*ZDIM*7 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f8B, fB_d+pitch_elements*YDIM*ZDIM*8 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f9B, fB_d+pitch_elements*YDIM*ZDIM*9 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f10B,fB_d+pitch_elements*YDIM*ZDIM*10,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f11B,fB_d+pitch_elements*YDIM*ZDIM*11,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f12B,fB_d+pitch_elements*YDIM*ZDIM*12,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f13B,fB_d+pitch_elements*YDIM*ZDIM*13,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f14B,fB_d+pitch_elements*YDIM*ZDIM*14,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f15B,fB_d+pitch_elements*YDIM*ZDIM*15,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f16B,fB_d+pitch_elements*YDIM*ZDIM*16,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f17B,fB_d+pitch_elements*YDIM*ZDIM*17,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f18B,fB_d+pitch_elements*YDIM*ZDIM*18,&desc,XDIM,YDIM*ZDIM,pitch); } // initialize<<<grid, threads>>>(f0_dA.ptr, f1_dA.ptr, f2_dA.ptr, f3_dA.ptr, f4_dA.ptr, f5_dA.ptr, f6_dA.ptr, f7_dA.ptr, f8_dA.ptr, f9_dA.ptr, // f10_dA.ptr, f11_dA.ptr, f12_dA.ptr, f13_dA.ptr, f14_dA.ptr, f15_dA.ptr, f16_dA.ptr, f17_dA.ptr, f18_dA.ptr, // XDIM,YDIM,pitch); // initialize<<<grid, threads>>>(f0_dA, f1_dA, f2_dA, f3_dA, f4_dA, f5_dA, f6_dA, f7_dA, f8_dA, f9_dA, // f10_dA, f11_dA, f12_dA, f13_dA, f14_dA, f15_dA, f16_dA, f17_dA, f18_dA, // XDIM,YDIM,pitch_elements); initialize_single<<<grid, threads>>>(fA_d,pitch_elements); // cudaFuncSetCacheConfig(mrt_d_single,cudaFuncCachePreferL1); struct timeval tdr0,tdr1; double restime; cudaDeviceSynchronize(); gettimeofday (&tdr0,NULL); for(int t = 0; t<TMAX; t=t+2){ //for(int t = 0; t<TMAX; t=t+1){ if(METHOD == "SINGLE"){ mrt_d_single<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements); mrt_d_single<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements); } else if(METHOD == "HYB"){ mrt_d_hybAB<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements); mrt_d_hybBA<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements); } else if(METHOD == "TEXT"){ mrt_d_textAB<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements); mrt_d_textBA<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements); } else if(METHOD == "SHARED"){ mrt_d_shared<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements); mrt_d_shared<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements); } // simple_copy<<<grid, threads>>>(fA_d,fB_d,image_d,omega,UMAX,XDIM,YDIM,ZDIM,pitch_elements); // simple_copy<<<grid, threads>>>(fB_d,fA_d,image_d,omega,UMAX,XDIM,YDIM,ZDIM,pitch_elements); if(t%1000 == 0 && t>0) cout<<"finished "<<t<<" timesteps\n"; } cudaDeviceSynchronize(); gettimeofday (&tdr1,NULL); timeval_subtract (&restime, &tdr1, &tdr0); cout<<"Time taken for main kernel: "<<restime<<" (" <<double(XDIM*YDIM*ZDIM*double(TMAX/1000000.f))/restime<<"MLUPS)"<<endl; cout<<XDIM<<","<<YDIM<<","<<ZDIM<<","<<TMAX<<","<<restime<<endl; // copytest<<<grid, threads>>>(f10_dA,test_d,XDIM,YDIM,ZDIM); //copytest<<<grid, threads>>>(test_d); //copytest<<<grid, threads>>>(image_d); cudaUnbindTexture(texRef_f0A); cudaUnbindTexture(texRef_f1A); cudaUnbindTexture(texRef_f2A); cudaUnbindTexture(texRef_f3A); cudaUnbindTexture(texRef_f4A); cudaUnbindTexture(texRef_f5A); cudaUnbindTexture(texRef_f6A); cudaUnbindTexture(texRef_f7A); cudaUnbindTexture(texRef_f8A); cudaUnbindTexture(texRef_f9A); cudaUnbindTexture(texRef_f10A); cudaUnbindTexture(texRef_f11A); cudaUnbindTexture(texRef_f12A); cudaUnbindTexture(texRef_f13A); cudaUnbindTexture(texRef_f14A); cudaUnbindTexture(texRef_f15A); cudaUnbindTexture(texRef_f16A); cudaUnbindTexture(texRef_f17A); cudaUnbindTexture(texRef_f18A); cudaUnbindTexture(texRef_f0B); cudaUnbindTexture(texRef_f1B); cudaUnbindTexture(texRef_f2B); cudaUnbindTexture(texRef_f3B); cudaUnbindTexture(texRef_f4B); cudaUnbindTexture(texRef_f5B); cudaUnbindTexture(texRef_f6B); cudaUnbindTexture(texRef_f7B); cudaUnbindTexture(texRef_f8B); cudaUnbindTexture(texRef_f9B); cudaUnbindTexture(texRef_f10B); cudaUnbindTexture(texRef_f11B); cudaUnbindTexture(texRef_f12B); cudaUnbindTexture(texRef_f13B); cudaUnbindTexture(texRef_f14B); cudaUnbindTexture(texRef_f15B); cudaUnbindTexture(texRef_f16B); cudaUnbindTexture(texRef_f17B); cudaUnbindTexture(texRef_f18B); // cudaMemcpy2D(f0_h,XDIM*sizeof(float) , f0_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f1_h,XDIM*sizeof(float) , f1_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f2_h,XDIM*sizeof(float) , f2_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f3_h,XDIM*sizeof(float) , f3_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f4_h,XDIM*sizeof(float) , f4_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f5_h,XDIM*sizeof(float) , f5_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f6_h,XDIM*sizeof(float) , f6_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f7_h,XDIM*sizeof(float) , f7_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f8_h,XDIM*sizeof(float) , f8_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f9_h,XDIM*sizeof(float) , f9_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f10_h,XDIM*sizeof(float),f10_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f11_h,XDIM*sizeof(float),f11_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f12_h,XDIM*sizeof(float),f12_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f13_h,XDIM*sizeof(float),f13_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f14_h,XDIM*sizeof(float),f14_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f15_h,XDIM*sizeof(float),f15_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f16_h,XDIM*sizeof(float),f16_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f17_h,XDIM*sizeof(float),f17_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); // cudaMemcpy2D(f18_h,XDIM*sizeof(float),f18_dA,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); cudaMemcpy2D(fA_h,XDIM*sizeof(float),fA_d,pitch,XDIM*sizeof(float),YDIM*ZDIM*19,cudaMemcpyDeviceToHost); // cout<<"f1_h is "<<f1_h[0]<<endl; //cudaMemcpy(f0_h, f0_d.ptr, memsize, cudaMemcpyDeviceToHost); cudaMemcpy(image_h, image_d, memsize_int, cudaMemcpyDeviceToHost); // cout<<image_h[0]<<endl; // cout<<"test_d: "<<test_h[0]<<endl; // for(i = 0; i<n; i++){ // cout<<f0_h[i]<<","; // } output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\"\n"; output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM<<"\n"; int row = 0; int col = 0; int dep = 0; i = 0; float rho, u, v, w; int j; for(dep = 0; dep<ZDIM; dep++){ for(row = 0; row<YDIM; row++){ for(col = 0; col<XDIM; col++){ i = dep*XDIM*YDIM+row*XDIM+col; // rho = 0; rho = fA_h[i]; for(j = 1; j<19; j++) rho+=fA_h[i+XDIM*YDIM*ZDIM*j]; // rho = f0_h[i]+f1_h[i]+f2_h[i]+f3_h[i]+f4_h[i]+f5_h[i]+f6_h[i]+f7_h[i]+f8_h[i]+f9_h[i]+ // f10_h[i]+f11_h[i]+f12_h[i]+f13_h[i]+f14_h[i]+f15_h[i]+f16_h[i]+f17_h[i]+f18_h[i]; u = fA_h[i+XDIM*YDIM*ZDIM*1]-fA_h[i+XDIM*YDIM*ZDIM*3]+fA_h[i+XDIM*YDIM*ZDIM*5]-fA_h[i+XDIM*YDIM*ZDIM*6]- fA_h[i+XDIM*YDIM*ZDIM*7]+fA_h[i+XDIM*YDIM*ZDIM*8]+fA_h[i+XDIM*YDIM*ZDIM*10]-fA_h[i+XDIM*YDIM*ZDIM*12] +fA_h[i+XDIM*YDIM*ZDIM*15]-fA_h[i+XDIM*YDIM*ZDIM*17]; v = fA_h[i+XDIM*YDIM*ZDIM*2]-fA_h[i+XDIM*YDIM*ZDIM*4]+fA_h[i+XDIM*YDIM*ZDIM*5]+fA_h[i+XDIM*YDIM*ZDIM*6]-fA_h[i+XDIM*YDIM*ZDIM*7]-fA_h[i+XDIM*YDIM*ZDIM*8]+fA_h[i+XDIM*YDIM*ZDIM*11]-fA_h[i+XDIM*YDIM*ZDIM*13]+fA_h[i+XDIM*YDIM*ZDIM*16]-fA_h[i+XDIM*YDIM*ZDIM*18]; w = fA_h[i+XDIM*YDIM*ZDIM*9]+fA_h[i+XDIM*YDIM*ZDIM*10]+fA_h[i+XDIM*YDIM*ZDIM*11]+fA_h[i+XDIM*YDIM*ZDIM*12]+fA_h[i+XDIM*YDIM*ZDIM*13]-fA_h[i+XDIM*YDIM*ZDIM*14]-fA_h[i+XDIM*YDIM*ZDIM*15]-fA_h[i+XDIM*YDIM*ZDIM*16]-fA_h[i+XDIM*YDIM*ZDIM*17]-fA_h[i+XDIM*YDIM*ZDIM*18]; output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<w<<","<<rho<<endl; // output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<fA_h[i+XDIM*YDIM*ZDIM*1]<<","<<rho<<endl; } } } output.close(); // cout<<endl<<fA_h[1280+81920]; // cout<<endl<<fA_h[1281+81920]; // cout<<endl<<fA_h[1282+81920]; cudaFree(image_d); // cudaFree(f0_dA); // cudaFree(f1_dA); // cudaFree(f2_dA); // cudaFree(f3_dA); // cudaFree(f4_dA); // cudaFree(f5_dA); // cudaFree(f6_dA); // cudaFree(f7_dA); // cudaFree(f8_dA); // cudaFree(f9_dA); // cudaFree(f10_dA); // cudaFree(f11_dA); // cudaFree(f12_dA); // cudaFree(f13_dA); // cudaFree(f14_dA); // cudaFree(f15_dA); // cudaFree(f16_dA); // cudaFree(f17_dA); // cudaFree(f18_dA); // cudaFree(f0_dB); // cudaFree(f1_dB); // cudaFree(f2_dB); // cudaFree(f3_dB); // cudaFree(f4_dB); // cudaFree(f5_dB); // cudaFree(f6_dB); // cudaFree(f7_dB); // cudaFree(f8_dB); // cudaFree(f9_dB); // cudaFree(f10_dB); // cudaFree(f11_dB); // cudaFree(f12_dB); // cudaFree(f13_dB); // cudaFree(f14_dB); // cudaFree(f15_dB); // cudaFree(f16_dB); // cudaFree(f17_dB); // cudaFree(f18_dB); cudaFree(fA_d); cudaFree(fB_d); return(0); }
11d52a50fefb00a12087c540759962229a59fd02.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include "largerBlocks.h" #include "sad.h" typedef struct { unsigned short x; unsigned short y; } __align__(4) uhvec; typedef unsigned int uint; __global__ void larger_sad_calc_8(unsigned short* blk_sad, int mb_width, int mb_height) { int tx = threadIdx.y & 1; int ty = threadIdx.y >> 1; /* Macroblock and sub-block coordinates */ int mb_x = blockIdx.x; int mb_y = blockIdx.y; /* Number of macroblocks in a frame */ int macroblocks = __mul24(mb_width, mb_height); int macroblock_index = (__mul24(mb_y, mb_width) + mb_x) * MAX_POS_PADDED; int search_pos; unsigned short* bi; unsigned short *bo_6, *bo_5, *bo_4; bi = blk_sad + (__mul24(macroblocks, 25) + (ty * 8 + tx * 2)) * MAX_POS_PADDED + macroblock_index * 16; // Block type 6: 4x8 bo_6 = blk_sad + ((macroblocks << 4) + macroblocks + (ty * 4 + tx * 2)) * MAX_POS_PADDED + macroblock_index * 8; if (ty < 100) // always true, but improves register allocation { // Block type 5: 8x4 bo_5 = blk_sad + ((macroblocks << 3) + macroblocks + (ty * 4 + tx)) * MAX_POS_PADDED + macroblock_index * 8; // Block type 4: 8x8 bo_4 = blk_sad + ((macroblocks << 2) + macroblocks + (ty * 2 + tx)) * MAX_POS_PADDED + macroblock_index * 4; } for (search_pos = threadIdx.x; search_pos < (MAX_POS + 1) / 2; search_pos += 32) { /* Each uint is actually two 2-byte integers packed together. * Only addition is used and there is no chance of integer overflow * so this can be done to reduce computation time. */ uint i00 = ((uint*)bi)[search_pos]; uint i01 = ((uint*)bi)[search_pos + MAX_POS_PADDED / 2]; uint i10 = ((uint*)bi)[search_pos + 4 * MAX_POS_PADDED / 2]; uint i11 = ((uint*)bi)[search_pos + 5 * MAX_POS_PADDED / 2]; ((uint*)bo_6)[search_pos] = i00 + i10; ((uint*)bo_6)[search_pos + MAX_POS_PADDED / 2] = i01 + i11; ((uint*)bo_5)[search_pos] = i00 + i01; ((uint*)bo_5)[search_pos + 2 * MAX_POS_PADDED / 2] = i10 + i11; ((uint*)bo_4)[search_pos] = (i00 + i01) + (i10 + i11); } } __global__ void larger_sad_calc_16(unsigned short* blk_sad, int mb_width, int mb_height) { /* Macroblock coordinates */ int mb_x = blockIdx.x; int mb_y = blockIdx.y; /* Number of macroblocks in a frame */ int macroblocks = __mul24(mb_width, mb_height) * MAX_POS_PADDED; int macroblock_index = (__mul24(mb_y, mb_width) + mb_x) * MAX_POS_PADDED; int search_pos; unsigned short* bi; unsigned short *bo_3, *bo_2, *bo_1; // bi = blk_sad + macroblocks * 5 + macroblock_index * 4; bi = blk_sad + ((macroblocks + macroblock_index) << 2) + macroblocks; // Block type 3: 8x16 // bo_3 = blk_sad + macroblocks * 3 + macroblock_index * 2; bo_3 = blk_sad + ((macroblocks + macroblock_index) << 1) + macroblocks; // Block type 5: 8x4 bo_2 = blk_sad + macroblocks + macroblock_index * 2; // Block type 4: 8x8 bo_1 = blk_sad + macroblock_index; for (search_pos = threadIdx.x; search_pos < (MAX_POS + 1) / 2; search_pos += 32) { /* Each uint is actually two 2-byte integers packed together. * Only addition is used and there is no chance of integer overflow * so this can be done to reduce computation time. */ uint i00 = ((uint*)bi)[search_pos]; uint i01 = ((uint*)bi)[search_pos + MAX_POS_PADDED / 2]; uint i10 = ((uint*)bi)[search_pos + 2 * MAX_POS_PADDED / 2]; uint i11 = ((uint*)bi)[search_pos + 3 * MAX_POS_PADDED / 2]; ((uint*)bo_3)[search_pos] = i00 + i10; ((uint*)bo_3)[search_pos + MAX_POS_PADDED / 2] = i01 + i11; ((uint*)bo_2)[search_pos] = i00 + i01; ((uint*)bo_2)[search_pos + MAX_POS_PADDED / 2] = i10 + i11; ((uint*)bo_1)[search_pos] = (i00 + i01) + (i10 + i11); } }
11d52a50fefb00a12087c540759962229a59fd02.cu
/*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include "largerBlocks.h" #include "sad.h" typedef struct { unsigned short x; unsigned short y; } __align__(4) uhvec; typedef unsigned int uint; __global__ void larger_sad_calc_8(unsigned short* blk_sad, int mb_width, int mb_height) { int tx = threadIdx.y & 1; int ty = threadIdx.y >> 1; /* Macroblock and sub-block coordinates */ int mb_x = blockIdx.x; int mb_y = blockIdx.y; /* Number of macroblocks in a frame */ int macroblocks = __mul24(mb_width, mb_height); int macroblock_index = (__mul24(mb_y, mb_width) + mb_x) * MAX_POS_PADDED; int search_pos; unsigned short* bi; unsigned short *bo_6, *bo_5, *bo_4; bi = blk_sad + (__mul24(macroblocks, 25) + (ty * 8 + tx * 2)) * MAX_POS_PADDED + macroblock_index * 16; // Block type 6: 4x8 bo_6 = blk_sad + ((macroblocks << 4) + macroblocks + (ty * 4 + tx * 2)) * MAX_POS_PADDED + macroblock_index * 8; if (ty < 100) // always true, but improves register allocation { // Block type 5: 8x4 bo_5 = blk_sad + ((macroblocks << 3) + macroblocks + (ty * 4 + tx)) * MAX_POS_PADDED + macroblock_index * 8; // Block type 4: 8x8 bo_4 = blk_sad + ((macroblocks << 2) + macroblocks + (ty * 2 + tx)) * MAX_POS_PADDED + macroblock_index * 4; } for (search_pos = threadIdx.x; search_pos < (MAX_POS + 1) / 2; search_pos += 32) { /* Each uint is actually two 2-byte integers packed together. * Only addition is used and there is no chance of integer overflow * so this can be done to reduce computation time. */ uint i00 = ((uint*)bi)[search_pos]; uint i01 = ((uint*)bi)[search_pos + MAX_POS_PADDED / 2]; uint i10 = ((uint*)bi)[search_pos + 4 * MAX_POS_PADDED / 2]; uint i11 = ((uint*)bi)[search_pos + 5 * MAX_POS_PADDED / 2]; ((uint*)bo_6)[search_pos] = i00 + i10; ((uint*)bo_6)[search_pos + MAX_POS_PADDED / 2] = i01 + i11; ((uint*)bo_5)[search_pos] = i00 + i01; ((uint*)bo_5)[search_pos + 2 * MAX_POS_PADDED / 2] = i10 + i11; ((uint*)bo_4)[search_pos] = (i00 + i01) + (i10 + i11); } } __global__ void larger_sad_calc_16(unsigned short* blk_sad, int mb_width, int mb_height) { /* Macroblock coordinates */ int mb_x = blockIdx.x; int mb_y = blockIdx.y; /* Number of macroblocks in a frame */ int macroblocks = __mul24(mb_width, mb_height) * MAX_POS_PADDED; int macroblock_index = (__mul24(mb_y, mb_width) + mb_x) * MAX_POS_PADDED; int search_pos; unsigned short* bi; unsigned short *bo_3, *bo_2, *bo_1; // bi = blk_sad + macroblocks * 5 + macroblock_index * 4; bi = blk_sad + ((macroblocks + macroblock_index) << 2) + macroblocks; // Block type 3: 8x16 // bo_3 = blk_sad + macroblocks * 3 + macroblock_index * 2; bo_3 = blk_sad + ((macroblocks + macroblock_index) << 1) + macroblocks; // Block type 5: 8x4 bo_2 = blk_sad + macroblocks + macroblock_index * 2; // Block type 4: 8x8 bo_1 = blk_sad + macroblock_index; for (search_pos = threadIdx.x; search_pos < (MAX_POS + 1) / 2; search_pos += 32) { /* Each uint is actually two 2-byte integers packed together. * Only addition is used and there is no chance of integer overflow * so this can be done to reduce computation time. */ uint i00 = ((uint*)bi)[search_pos]; uint i01 = ((uint*)bi)[search_pos + MAX_POS_PADDED / 2]; uint i10 = ((uint*)bi)[search_pos + 2 * MAX_POS_PADDED / 2]; uint i11 = ((uint*)bi)[search_pos + 3 * MAX_POS_PADDED / 2]; ((uint*)bo_3)[search_pos] = i00 + i10; ((uint*)bo_3)[search_pos + MAX_POS_PADDED / 2] = i01 + i11; ((uint*)bo_2)[search_pos] = i00 + i01; ((uint*)bo_2)[search_pos + MAX_POS_PADDED / 2] = i10 + i11; ((uint*)bo_1)[search_pos] = (i00 + i01) + (i10 + i11); } }
a192d62d47557982005a5e988cf21bc034a345b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <opencv3/highgui/highgui.hpp> #include "gcube.h" #include "gpu_util.h" gcube::gcube(void) { this->d_pixels = NULL; this->create(0, 0, 0, gfill::none); } gcube::gcube(size_t n_rows, size_t n_cols, size_t n_slices, uint8_t fill_type) { this->d_pixels = NULL; this->create(n_rows, n_cols, n_slices, fill_type); } gcube::gcube(const gcube &gpucube) { this->d_pixels = NULL; this->create(gpucube.n_rows, gpucube.n_cols, gpucube.n_slices, gfill::none); checkCudaErrors(hipMemcpy(this->d_pixels, gpucube.d_pixels, this->n_elem * sizeof(float), hipMemcpyDeviceToDevice)); } gcube::gcube(const std::string &fname) { this->d_pixels = NULL; this->load(fname); } //gcube::gcube(const std::vector<float> list) { // this->d_pixels = NULL; // this->create(list); //} gcube::~gcube(void) { if (this->d_pixels) { checkCudaErrors(hipFree(this->d_pixels)); } } __global__ void GPU_map_assign(float *F, float val, size_t n_elems) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= n_elems) { return; } F[idx] = val; } void gcube::create(size_t n_rows, size_t n_cols, size_t n_slices, uint8_t fill_type) { if (this->d_pixels) { checkCudaErrors(hipFree(d_pixels)); } this->n_rows = n_rows; this->n_cols = n_cols; this->n_slices = n_slices; this->n_elem = n_rows * n_cols * n_slices; if (this->n_elem == 0) { this->d_pixels = NULL; } else { checkCudaErrors(hipMalloc(&this->d_pixels, this->n_elem * sizeof(float))); switch (fill_type) { case gfill::none: break; case gfill::zeros: checkCudaErrors(hipMemset(this->d_pixels, 0, this->n_elem * sizeof(float))); break; case gfill::ones: hipLaunchKernelGGL(( GPU_map_assign), dim3((this->n_elem-1) / 128 + 1), dim3(128), 0, 0, this->d_pixels, 1, this->n_elem); checkCudaErrors(hipGetLastError()); break; default: break; } } } gcube &gcube::operator=(const gcube &gpucube) { this->create(gpucube.n_rows, gpucube.n_cols, gpucube.n_slices, gfill::none); checkCudaErrors(hipMemcpy(this->d_pixels, gpucube.d_pixels, this->n_elem * sizeof(float), hipMemcpyDeviceToDevice)); return *this; } void gcube::load(const std::string &fname) { this->create(cv::imread(fname)); } void gcube::save(const std::string &fname) { cv::imwrite(fname, this->cv_mat()); } // Specific OpenCV interaction (to make sure that they are backwards compatible) gcube::gcube(cv::Mat &cvMat) { this->d_pixels = NULL; this->create(cvMat); } void gcube::create(const cv::Mat &cvMat) { this->create(cvMat.rows, cvMat.cols, cvMat.channels(), gfill::none); float *h_pixels = new float[this->n_elem]; for (int i = 0; i < this->n_rows; i++) { for (int j = 0; j < this->n_cols; j++) { cv::Vec3b color = cvMat.at<cv::Vec3b>(i, j); for (int k = 0; k < this->n_slices; k++) { h_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)] = (float)color[k] / 255.0f; } } } checkCudaErrors(hipMemcpy(this->d_pixels, h_pixels, this->n_elem * sizeof(float), hipMemcpyHostToDevice)); free(h_pixels); } void gcube::create(const cv::Mat &cvMat, int x1, int x2, int y1, int y2) { assert(x1 <= x2 && y1 <= y2 && x2 <= cvMat.cols && y2 <= cvMat.rows); this->create(y2 - y1, x2 - x1, cvMat.channels(), gfill::none); float *h_pixels = new float[this->n_elem]; for (int i = y1; i < y2; i++) { for (int j = x1; j < x2; j++) { cv::Vec3b color = cvMat.at<cv::Vec3b>(i, j); for (int k = 0; k < this->n_slices; k++) { h_pixels[IJK2C(i - y1, j - x1, k, this->n_rows, this->n_cols)] = (float)color[k] / 255.0f; } } } checkCudaErrors(hipMemcpy(this->d_pixels, h_pixels, this->n_elem * sizeof(float), hipMemcpyHostToDevice)); free(h_pixels); } cv::Mat gcube::cv_mat(void) { cv::Mat cv_image(this->n_rows, this->n_cols, CV_8UC3); float *h_pixels = new float[this->n_elem]; checkCudaErrors(hipMemcpy(h_pixels, this->d_pixels, this->n_elem * sizeof(float), hipMemcpyDeviceToHost)); for (int i = 0; i < this->n_rows; i++) { for (int j = 0; j < this->n_cols; j++) { if (this->n_slices == 1) { cv_image.at<cv::Vec3b>(i, j) = cv::Vec3b((int)(h_pixels[IJ2C(i, j, this->n_rows)] * 255.0f), (int)(h_pixels[IJ2C(i, j, this->n_rows)] * 255.0f), (int)(h_pixels[IJ2C(i, j, this->n_rows)] * 255.0f)); } else if (this->n_slices == 3) { cv_image.at<cv::Vec3b>(i, j) = cv::Vec3b((int)(h_pixels[IJK2C(i, j, 0, this->n_rows, this->n_cols)] * 255.0f), (int)(h_pixels[IJK2C(i, j, 1, this->n_rows, this->n_cols)] * 255.0f), (int)(h_pixels[IJK2C(i, j, 2, this->n_rows, this->n_cols)] * 255.0f)); } } } free(h_pixels); return cv_image; } gcube &gcube::operator=(const cv::Mat &cvMat) { this->create(cvMat); return *this; }
a192d62d47557982005a5e988cf21bc034a345b0.cu
#include <opencv3/highgui/highgui.hpp> #include "gcube.h" #include "gpu_util.h" gcube::gcube(void) { this->d_pixels = NULL; this->create(0, 0, 0, gfill::none); } gcube::gcube(size_t n_rows, size_t n_cols, size_t n_slices, uint8_t fill_type) { this->d_pixels = NULL; this->create(n_rows, n_cols, n_slices, fill_type); } gcube::gcube(const gcube &gpucube) { this->d_pixels = NULL; this->create(gpucube.n_rows, gpucube.n_cols, gpucube.n_slices, gfill::none); checkCudaErrors(cudaMemcpy(this->d_pixels, gpucube.d_pixels, this->n_elem * sizeof(float), cudaMemcpyDeviceToDevice)); } gcube::gcube(const std::string &fname) { this->d_pixels = NULL; this->load(fname); } //gcube::gcube(const std::vector<float> list) { // this->d_pixels = NULL; // this->create(list); //} gcube::~gcube(void) { if (this->d_pixels) { checkCudaErrors(cudaFree(this->d_pixels)); } } __global__ void GPU_map_assign(float *F, float val, size_t n_elems) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= n_elems) { return; } F[idx] = val; } void gcube::create(size_t n_rows, size_t n_cols, size_t n_slices, uint8_t fill_type) { if (this->d_pixels) { checkCudaErrors(cudaFree(d_pixels)); } this->n_rows = n_rows; this->n_cols = n_cols; this->n_slices = n_slices; this->n_elem = n_rows * n_cols * n_slices; if (this->n_elem == 0) { this->d_pixels = NULL; } else { checkCudaErrors(cudaMalloc(&this->d_pixels, this->n_elem * sizeof(float))); switch (fill_type) { case gfill::none: break; case gfill::zeros: checkCudaErrors(cudaMemset(this->d_pixels, 0, this->n_elem * sizeof(float))); break; case gfill::ones: GPU_map_assign<<<(this->n_elem-1) / 128 + 1, 128>>>(this->d_pixels, 1, this->n_elem); checkCudaErrors(cudaGetLastError()); break; default: break; } } } gcube &gcube::operator=(const gcube &gpucube) { this->create(gpucube.n_rows, gpucube.n_cols, gpucube.n_slices, gfill::none); checkCudaErrors(cudaMemcpy(this->d_pixels, gpucube.d_pixels, this->n_elem * sizeof(float), cudaMemcpyDeviceToDevice)); return *this; } void gcube::load(const std::string &fname) { this->create(cv::imread(fname)); } void gcube::save(const std::string &fname) { cv::imwrite(fname, this->cv_mat()); } // Specific OpenCV interaction (to make sure that they are backwards compatible) gcube::gcube(cv::Mat &cvMat) { this->d_pixels = NULL; this->create(cvMat); } void gcube::create(const cv::Mat &cvMat) { this->create(cvMat.rows, cvMat.cols, cvMat.channels(), gfill::none); float *h_pixels = new float[this->n_elem]; for (int i = 0; i < this->n_rows; i++) { for (int j = 0; j < this->n_cols; j++) { cv::Vec3b color = cvMat.at<cv::Vec3b>(i, j); for (int k = 0; k < this->n_slices; k++) { h_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)] = (float)color[k] / 255.0f; } } } checkCudaErrors(cudaMemcpy(this->d_pixels, h_pixels, this->n_elem * sizeof(float), cudaMemcpyHostToDevice)); free(h_pixels); } void gcube::create(const cv::Mat &cvMat, int x1, int x2, int y1, int y2) { assert(x1 <= x2 && y1 <= y2 && x2 <= cvMat.cols && y2 <= cvMat.rows); this->create(y2 - y1, x2 - x1, cvMat.channels(), gfill::none); float *h_pixels = new float[this->n_elem]; for (int i = y1; i < y2; i++) { for (int j = x1; j < x2; j++) { cv::Vec3b color = cvMat.at<cv::Vec3b>(i, j); for (int k = 0; k < this->n_slices; k++) { h_pixels[IJK2C(i - y1, j - x1, k, this->n_rows, this->n_cols)] = (float)color[k] / 255.0f; } } } checkCudaErrors(cudaMemcpy(this->d_pixels, h_pixels, this->n_elem * sizeof(float), cudaMemcpyHostToDevice)); free(h_pixels); } cv::Mat gcube::cv_mat(void) { cv::Mat cv_image(this->n_rows, this->n_cols, CV_8UC3); float *h_pixels = new float[this->n_elem]; checkCudaErrors(cudaMemcpy(h_pixels, this->d_pixels, this->n_elem * sizeof(float), cudaMemcpyDeviceToHost)); for (int i = 0; i < this->n_rows; i++) { for (int j = 0; j < this->n_cols; j++) { if (this->n_slices == 1) { cv_image.at<cv::Vec3b>(i, j) = cv::Vec3b((int)(h_pixels[IJ2C(i, j, this->n_rows)] * 255.0f), (int)(h_pixels[IJ2C(i, j, this->n_rows)] * 255.0f), (int)(h_pixels[IJ2C(i, j, this->n_rows)] * 255.0f)); } else if (this->n_slices == 3) { cv_image.at<cv::Vec3b>(i, j) = cv::Vec3b((int)(h_pixels[IJK2C(i, j, 0, this->n_rows, this->n_cols)] * 255.0f), (int)(h_pixels[IJK2C(i, j, 1, this->n_rows, this->n_cols)] * 255.0f), (int)(h_pixels[IJK2C(i, j, 2, this->n_rows, this->n_cols)] * 255.0f)); } } } free(h_pixels); return cv_image; } gcube &gcube::operator=(const cv::Mat &cvMat) { this->create(cvMat); return *this; }
98e65975097a503d019fd901ebc7dd48dad386a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /***************************************************************************************** calc_fits.c As the name implies, this routine calculates the fits to each data frame for the current set of model parameters. For example, for each delay-Doppler frame it calls routine posvis to create the model plane-of-sky image and then routine pos2deldop to create the model delay-Doppler image from this POS image. calc_fits also performs some of the screen and file output required by the "write" action; in particular, it carries out tasks that require information associated with plane-of-sky renderings, since such information is quickly overwritten if the "pos_scope" parameter is set to "global" (i.e., if all frames and lightcurve points share the same memory for their "pos" structures). Modified 2015 June 10 by CM: Implement smearing for the "fit" and "write" actions Modified 2014 February 14 by CM: Add "ilaw" argument to the apply_photo routine Modified 2013 July 28 by CM: For the "write" action, output ppm POS images when the "write_highlight" parameter is turned on Modified 2013 July 7 by CM: For the "write" action for lightcurve points and plane-of-sky frames, display the body-fixed longitude and latitude of the phase-angle bisector Modified 2013 June 25 by CM: Allow POS images written for optical data to be annotated with principal-axis shafts and the angular momentum vector For POS images (sky renderings), display the name of the image file and the maximum pixel value in the plot_surface routine (called by the write_pos routine) rather than here Modified 2013 April 24 by CM: Implement the "listpos_deldop" "listpos_opt" and "listpos_path" parameters Adjust names of output images so they are in alphanumeric order if > 100 per dataset Modified 2012 April 2 by CM: Correct instantaneous maximum breadth calculation for Doppler scaling factor Modified 2011 August 14 by CM: Display sidereal spin vector at each epoch, even for a PA rotator, if any spin impulses are used Modified 2010 September 1 by CM: Initialize variables to avoid compilation warnings Modified 2010 July 29 by CM: Fix bug introduced in calc_lghtcrv: rotation phases weren't being displayed for the "write" action For the "write" action for lightcurve datasets, include shadowed regions in projected area (and geometric albedo calculation) and display percentage of projected area that's shadowed Modified 2010 June 15 by CM: Revise arguments to pos2deldop and pos2doppler routines Modified 2010 May 28 by CM: Fix bug introduced with preceding change: in calc_lghtcrv, only deallocate memory for the "write" action (since it wasn't allocated in the first place for other actions) Modified 2010 May 24 by CM: For the "write" action for lightcurves, output the projected area and (for absolute photometry) geometric albedo Modified 2010 April 12 by CM: For the "write" action, include overflow region when computing cross sections Modified 2009 July 29 by CM: For the "write" action, fix bug: output ppm images rather than pgm images if the "plot_angmom" parameter is turned on For the "write" action, pass an argument to the "write_pos" routine explicitly telling it whether or not to produce a colored image Modified 2009 April 3 by CM: Initialize the "posbnd_logfactor" parameter and later set it for models that extend beyond the POS frame Add "badposet" and "badposet_logfactor" parameters: initialize them here and then use the new "checkposet" routine to adjust them for plane-of-sky fit images that are too small to "contain" the target Add "badradar" and "badradar_logfactor" parameters: initialize them here and then use the "pos2deldop" and "pos2doppler" routines (which are now int rather than void) to adjust them for models that are too wide in delay-Doppler space for the routines to handle Add "warn_badradar" argument to pos2deldop and pos2doppler routines For the "write" action, display each plane-of-sky fit frame's linear dimensions, the linear dimensions of the rectangular subset that contains the target, and the linear COM offsets Modified 2008 December 12 by CM: For the "write" action for NPA rotators, list Euler angles (giving the body-fixed axes' orientations in ecliptic coordinates) and spin vector components (in body-fixed coordinates) for each observation epoch For the "write" action for NPA rotators, ensure that maximum breadth is nonnegative Modified 2007 August 10 by CM: Eliminated unused variables and cleaned up a printf format For POS model frames (sky renderings) associated with lightcurve points and with plane-of-sky data frames, don't display the maximum pixel value unless the "optposmax" parameter is nonzero Modified 2007 August 4 by CM: Add comp matrix for POS frames Add orbit_offset and body arguments to posvis routine and remove facet argument Add orbit_xoff, orbit_yoff, orbit_dopoff and body parameters to pos2deldop and pos2doppler routines Add body argument to apply_photo routine Modified 2007 January 17 by CM: For the "write" action, display instantaneous folded zero-crossing bandwidth for Doppler and delay-Doppler frames Modified 2007 January 11 by CM: In calc_lghtcrv for the "write" action, count lightcurve points from 0 rather than 1, as is already done for lightcurve POS images (and for Doppler, delay-Doppler, and plane-of-sky frames) Modified 2007 January 6 by CM: In calc_lghtcrv for the "write" action, save rotation phase for each calculated lightcurve point so they can be output by routine chi2, and use cubic spline interpolation to obtain rotation phase at each observation epoch. Also display range of rotation phases if only one calculated point per lightcurve is displayed in full Modified 2006 October 1 by CM: In calc_lghtcrv, model lightcurve points are now intensities (relative to the solar intensity) rather than magnitudes In calc_lghtcrv and calc_poset, apply_photo routine has been revised to account for the POS pixel area and the 1 AU Sun-target distance Modified 2006 September 1 by CM and MCN: When "exclude_seen" parameter is used, add check that facet number pos->f[i][j] is nonnegative For the "write" action, don't display cross sections and albedos for uncalibrated (delay-)Doppler frames Modified 2006 June 21 by CM: In calc_deldop, changed delres to del_per_pixel and dopres to dop_per_pixel In calc_doppler, changed dopres to dop_per_bin For POS renderings and plane-of-sky fit frames, changed res to km_per_pixel Modified 2006 June 18 by CM: Allow each delay-Doppler frame within a dataset to have different dimensions after vignetting Allow each Doppler frame within a dataset to have different dimensions after vignetting Allow plane-of-sky frames to be rectangular rather than square, and no longer require an odd number of pixels per side Eliminate range datasets Modified 2006 March 10 by CM: Add "speckle" argument to pos2deldop and pos2doppler routines Modified 2005 October 6 by CM: For lightcurve datasets, replace SUNMAG constant by "sun_appmag" parameter, so that absolute photometry with filters other than V band can be used Modified 2005 July 25 by CM: For "write" action, display the model radar cross section and albedo for each delay-Doppler and Doppler frame Modified 2005 July 22 by CM: Created five separate routines for writing POS frames as images so that they can be called separately if the "mark_unseen" parameter is turned on for the "write" action (since in this case we must first process all datasets to see which model facets were "seen" and only then can write the POS images) Modified 2005 July 14 by CM: Fix bug in computing LE-to-COM delay and distance, LE-to-TE delay and distance, and instantantaneous bandwidth and breadth Modified 2005 July 13 by CM: For "write" action for lightcurve points and plane-of-sky frames, display the body-fixed longitude and latitude of the Sun-to-asteroid line Modified 2005 July 5 by CM: Remove the "dir" argument from pos2deldop and pos2doppler and add the "set" argument Modified 2005 July 3 by CM: For "write" action for lightcurve datasets, implement the "lcrv_writeall" parameter, which produces screen display for every model lightcurve point rather than just the one point which falls closest to the midpoint of the observations. Modified 2005 June 25 by CM: For "write" action for delay-Doppler frames, display the delay and distance between the leading edge and the center of mass and between the leading edge and the trailing edge; for delay-Doppler and Doppler frames, display the instantaneous zero-crossing bandwidth and maximum breadth. All of the above are obtained from the model's delay-Doppler limits as determined PRIOR to convolution with the delay and Doppler response functions. Modified 2005 June 22 by CM: Keep track of which model facets have been "seen" (i.e., are visible from Earth, are unshadowed, and have sufficiently low scattering and incidence angles) in at least one data frame or lightcurve point Modified 2005 April 23 by CM: For the "write" action, list whether or not epochs have been corrected for one-way light travel time Modified 2005 March 1 by CM: Adjust arguments to the revised "resampim" routine to permit rotation of resampled plane-of-sky frames Initialize the "posbnd" parameter (flag indicating that the model extends beyond the model POS frame) to 0 here rather than in bestfit.c so that it can used for actions other than "fit" Fix bug in calc_poset which was incorrectly flagging the model as being too small for the model POS frame Modified 2005 February 21 by CM: Use the new "poset_resample" parameter to allow interpolation methods other than bilinear for constructing plane-of-sky fit images for plane-of-sky data frames Add the new "image_rebin" argument to function resampim to handle plane-of-sky fit frames which have much coarser resolution than the model POS frames from which they are constructed (i.e., which are greatly undersampled) For "write" action, display maximum pixel value for model POS images for plane-of-sky frames and calculated lightcurve images (in case someone wants to use the "optposmax" parameter to truncate the image brightness) Modified 2005 February 6 by CM: For "write" action, display rotation phase For "write" action, fix bug in computing the angular body-fixed coordinates of the line of sight for lightcurve datasets Modified 2005 January 25 by CM: Take care of unused and uninitialized variables Modified 2005 January 24 by CM: Add "calc_poset" routine to handle POS datasets For "write" action, display the angular body-fixed coordinates of the line of sight For "write" action, display calendar dates in addition to Julian dates For "write" action, display the date for range datasets Modified 2004 December 19 by CM: For "write" action, display the projected area for each Doppler and delay-Doppler frame Modified 2004 May 3 by CM: For "write" action, display the (delay-)Doppler corrections for each frame Modified 2004 April 9 by CM: For "write" action, display the solar azimuth angles (N->E in the POS) Modified 2004 March 27 by CM: Eliminate output of range (rng) plane-of-sky images for delay-Doppler frames For "write" action, display the epoch, solar phase angle and apparent spin vector direction at the midpoint of lightcurve datasets For "write" action, if "plot_spinvec" parameter is turned on, POS pgm images include an arrow indicating the target's intrinsic spin vector. For "write" action, if "plot_subradar" parameter is turned on, POS pgm images for (delay-)Doppler datasets include an X indicating the target's subradar point. For "write" action, if "plot_com" parameter is turned on, POS pgm images for (delay-)Doppler datasets include a cross indicating the target's projected COM. For "write" action, if "plot_pa" parameter vector has any component(s) turned on, POS ppm images for (delay-)Doppler datasets include colored cylindrical shaft(s) indicating the positive end of the corresponding principal axis/axes. Modified 2004 Feb 29 by CM: Add comments for lightcurves Remove "sdev" argument to routine gamma_trans Compute lightcurve magnitudes rather than negative magnitudes Eliminate the "curve_mm" lightcurve output file, since it nearly duplicates the "fit.mm" file (except that the cal factor isn't included) Move the lightcurve calculations to the new "calc_lghtcrv" routine Eliminate the unused dat argument to calc_deldop, calc_doppler, and calc_range Eliminate "type" argument to the "apply_photo" routine, and add the "phase" (solar phase angle) argument Label lightcurve POS images as 0 through (ncalc-1) rather than 1 through ncalc, similar to (delay-)Doppler pgm images Modified 2003 July 30 by CM: Add three parameters for rotating/flipping output pgm files for delay-Doppler images (fit, data, residuals) Modified 2003 May 16 by CM: Add listres parameter for producing output files containing residual matrices Modified 2003 May 13 by CM: Don't resample and recenter residual pgm images if dd_scaling = none Correct a bug in normalizing file output for Doppler fits Modified 2003 May 10 by CM: Add scalefitobs parameter so that user can choose whether to scale the data and fit pgm images separately (default), to the maximum value of the two taken together, to the maximum fit value, or to the maximum data value Modified 2003 May 7 by CM: Add sinc2width argument to pos2deldop and pos2doppler Modified 2003 April 29 by CM: Don't truncate residuals to integer values before making pgm images Add nsinc2 argument to pos2deldop and pos2doppler Modified 2003 April 28 by CM: Display two angles for the spin vector, not just one Modified 2003 April 24 by CM: Move "delcom" from delay-Doppler datasets to individual frames Modified 2003 April 23 by CM: Removed "deldopoffs" call from calc_deldop and "dopoffs" call from calc_deldop, since these calls are now included in realize_delcor *****************************************************************************************/ extern "C" { #include "head.h" } __host__ void calc_deldop_cuda_af(struct par_t *dpar, struct mod_t *dmod, struct dat_t *ddat, int s, int c); __host__ void calc_doppler_cuda_af(struct par_t *dpar, struct mod_t *dmod, struct dat_t *ddat, int s, int c); //__host__ void calc_poset_cuda( struct par_t *par, struct mod_t *mod, int s); //__host__ void calc_lghtcrv_cuda(struct par_t *par, struct mod_t *mod, struct // lghtcrv_t *lghtcrv, int s); __device__ int cfaf_nframes, cfaf_nviews, cfaf_v0_index, cfaf_exclude_seen; __device__ unsigned char cfaf_type; __global__ void cf_init_devpar_af_krnl(struct par_t *dpar, struct mod_t *dmod, struct dat_t *ddat, int c, int *nf_nsets) { /* Single-threaded kernel */ if (threadIdx.x == 0) { dpar->posbnd = 0; dpar->badposet = 0; dpar->badradar = 0; dpar->posbnd_logfactor = 0.0; dpar->badposet_logfactor = 0.0; dpar->badradar_logfactor = 0.0; nf_nsets[0] = dmod->shape.comp[c].real.nf; nf_nsets[1] = ddat->nsets; } } __global__ void cf_init_seen_flags_af_krnl(struct mod_t *dmod, int c, int *nf_nsets) { /* nf-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; if (f < nf_nsets[0]) dmod->shape.comp[c].real.f[f].seen = 0; } __global__ void cf_get_set_type_af_krnl(struct dat_t *ddat, int s) { /* Single-threaded kernel */ if (threadIdx.x == 0) cfaf_type = ddat->set[s].type; } //__global__ void cf_set_final_pars_af_krnl(struct par_t *dpar, struct // dat_t *ddat) { // /* Single-threaded kernel */ // if (threadIdx.x == 0) { // dpar->posbnd_logfactor /= ddat->dof; // dpar->badposet_logfactor /= ddat->dof_poset; // dpar->badradar_logfactor /= (ddat->dof_deldop + ddat->dof_doppler); // } //} __host__ void calc_fits_cuda_af(struct par_t *dpar, struct mod_t *dmod, struct dat_t *ddat) { int s, *nf_nsets, c=0; unsigned char type; dim3 BLK,THD; cudaCalloc((void**)&nf_nsets, sizeof(int), 2); /* Initialize flags that indicate the model extends beyond POS frame, that * plane-of-sky fit images are too small to "contain" the target, and that * model is too wide in (delay-)Doppler space to create (delay-)Doppler fit * frames. Note that this also gets mod->shape.nf and nsets */ hipLaunchKernelGGL(( cf_init_devpar_af_krnl), dim3(1),dim3(1), 0, 0, dpar, dmod, ddat, c, nf_nsets); checkErrorAfterKernelLaunch("cf_init_devpar_af_krnl"); deviceSyncAfterKernelLaunch("cf_init_devpar_af_krn"); /* Initialize the flags that indicate whether or not each facet of each * model component is ever visible and unshadowed from Earth * Note: Single component only for now. */ //for (c=0; c<mod->shape.ncomp; c++) BLK.x = floor((maxThreadsPerBlock - 1 + nf_nsets[0])/maxThreadsPerBlock); THD.x = maxThreadsPerBlock; hipLaunchKernelGGL(( cf_init_seen_flags_af_krnl), dim3(BLK),dim3(THD), 0, 0, dmod, c, nf_nsets); checkErrorAfterKernelLaunch("cf_init_seen_flags_af_krnl"); deviceSyncAfterKernelLaunch("cf_init_seen_flags_af_krnl"); /* Calculate the fits for each dataset in turn - use multi-GPU later */ for (s=0; s<nf_nsets[1]; s++) { /* Get data type */ hipLaunchKernelGGL(( cf_get_set_type_af_krnl), dim3(1),dim3(1), 0, 0, ddat, s); checkErrorAfterKernelLaunch("cf_init_seen_flags_krnl (calc_fits_cuda)"); gpuErrchk(hipMemcpyFromSymbol(&type, cfaf_type, sizeof(unsigned char), 0, hipMemcpyDeviceToHost)); switch (type) { case DELAY: calc_deldop_cuda_af(dpar, dmod, ddat, s, c); break; case DOPPLER: calc_doppler_cuda_af(dpar, dmod, ddat, s, c); break; case POS: printf("Write calc_poset_cuda!"); // calc_poset_cuda(dpar, dmod, s); break; case LGHTCRV: printf("Write calc_lghtcrv_cuda!"); // calc_lghtcrv_cuda(dpar, dmod, s); break; default: printf("calc_fits_cuda.c: can't handle this type yet\n"); } } /* Complete calculations of values that will be used during a fit to * increase the objective function for models with bad properties */ hipLaunchKernelGGL(( cf_set_final_pars_krnl), dim3(1),dim3(1), 0, 0, dpar, ddat); checkErrorAfterKernelLaunch("cf_set_final_pars_af_krnl"); } __global__ void cf_get_frames_af_krnl(struct dat_t *ddat, int s) { /* Single-threaded kernel */ if (threadIdx.x == 0) { switch(ddat->set[s].type) { case DELAY: cfaf_nframes = ddat->set[s].desc.deldop.nframes; break; case DOPPLER: cfaf_nframes = ddat->set[s].desc.doppler.nframes; break; case POS: cfaf_nframes = ddat->set[s].desc.poset.nframes; break; case LGHTCRV: cfaf_nframes = ddat->set[s].desc.lghtcrv.ncalc; break; } } } __global__ void cf_set_shortcuts_deldop_af_krnl( struct dat_t *ddat, struct deldopfrm_t **frame, struct deldopview_t **view0, struct pos_t **pos, float *overflow, int *ndel, int *ndop, int s, int nframes) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm < nframes) { if (threadIdx.x==0) { cfaf_nviews = ddat->set[s].desc.deldop.nviews; cfaf_v0_index = ddat->set[s].desc.deldop.v0; overflow[0] = 0.0; // cf_overflow_o2_store = 0.0; overflow[1] = 0.0; // cf_overflow_m2_store = 0.0; overflow[2] = 0.0; // cf_overflow_xsec_store = 0.0; overflow[3] = 0.0; // cf_overflow_dopmean_store = 0.0; overflow[4] = 0.0; // cf_overflow_delmean_store = 0.0; } frame[frm] = &ddat->set[s].desc.deldop.frame[frm]; ndop[frm] = frame[frm]->ndop; ndel[frm] = frame[frm]->ndel; view0[frm] = &frame[frm]->view[ddat->set[s].desc.deldop.v0]; pos[frm] = &frame[frm]->pos; } } __global__ void cf_set_shortcuts_doppler_af_krnl(struct dat_t *ddat, int s, int nframes, struct dopfrm_t **frame, int *ndop, struct dopview_t **view0, struct pos_t **pos, float *overflow, int4 *xylim) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm < nframes) { if (threadIdx.x == 0) { cfaf_nviews = ddat->set[s].desc.doppler.nviews; cfaf_v0_index = ddat->set[s].desc.doppler.v0; overflow[0] = 0.0; // cf_overflow_o2_store = 0.0; overflow[1] = 0.0; // cf_overflow_m2_store = 0.0; overflow[2] = 0.0; // cf_overflow_xsec_store = 0.0; overflow[3] = 0.0; // cf_overflow_dopmean_store = 0.0; } frame[frm] = &ddat->set[s].desc.doppler.frame[frm]; view0[frm] = &frame[frm]->view[ddat->set[s].desc.doppler.v0]; ndop[frm] = frame[frm]->ndop; pos[frm] = &frame[frm]->pos; } } __global__ void cf_set_pos_ae_deldop_af_krnl(struct pos_t **pos, struct deldopfrm_t **frame, int *pos_n, int nframes, int v) { /* nframes*9-threaded kernel */ int offset = threadIdx.x; int i = offset % 3; int j = offset / 3; int frm = blockIdx.x; if ((offset < 9) && (frm < nframes)) { pos[frm]->ae[i][j] = frame[frm]->view[v].ae[i][j]; pos[frm]->oe[i][j] = frame[frm]->view[v].oe[i][j]; /* Single-thread task */ if (offset == 0) { pos[frm]->bistatic = 0; pos_n[frm] = pos[frm]->n; } } } __global__ void cf_set_pos_ae_doppler_af_krnl(struct pos_t **pos, struct dopfrm_t **frame, int *pos_n, int nframes, int v) { /* nframes*9-threaded kernel */ int offset = threadIdx.x; int i = offset % 3; int j = offset / 3; int frm = blockIdx.x; if ((offset < 9) && (frm < nframes)) { pos[frm]->ae[i][j] = frame[frm]->view[v].ae[i][j]; pos[frm]->oe[i][j] = frame[frm]->view[v].oe[i][j]; /* frm-level-thread task */ if (offset == 0) { pos[frm]->bistatic = 0; pos_n[frm] = pos[frm]->n; } } } __global__ void cf_posclr_af_krnl(struct pos_t **pos, int n, int nx, int frame_size, int nframes) { /* (nframes * npixels)-threaded kernel where npixels is the number of pixels * in the full POS image, so (2*pos->n + 1)^2 */ int total_offset = blockIdx.x * blockDim.x + threadIdx.x; int frm = total_offset / frame_size; int offset = total_offset % frame_size; // local offset within one frame int i = (offset % nx) - n; int j = (offset / nx) - n; if ((offset < frame_size) && (total_offset < nframes*frame_size) && (frm < nframes)) { /* For each POS pixel, zero out the optical brightness (b) and * cos(scattering angle), reset the z coordinate (distance from COM towards * Earth) to a dummy value, and reset the body, component, and facet onto * which the pixel center projects to dummy values */ pos[frm]->body[i][j] = pos[frm]->comp[i][j] = pos[frm]->f[i][j] = -1; pos[frm]->b_s[offset] = pos[frm]->cose_s[offset] = 0.0; pos[frm]->z_s[offset] = -HUGENUMBER; /* In the x direction, reset the model's leftmost and rightmost * pixel number to dummy values, and similarly for the y direction */ pos[frm]->xlim[0] = pos[frm]->ylim[0] = n; pos[frm]->xlim[1] = pos[frm]->ylim[1] = -n; /* For a bistatic situation (lightcurve or plane-of-sky dataset), zero out * cos(incidence angle) and reset the distance towards the sun, the body, * component, and facet numbers as viewed from the sun, and the model's * maximum projected extent as viewed from the sun to dummy values */ if (pos[frm]->bistatic) { pos[frm]->bodyill[i][j] = pos[frm]->compill[i][j] = pos[frm]->fill[i][j] = -1; pos[frm]->cosill_s[offset] = 0.0; pos[frm]->zill_s[offset] = 0.0; pos[frm]->xlim2[0] = pos[frm]->ylim2[0] = n; pos[frm]->xlim2[1] = pos[frm]->ylim2[1] = -n; } } } __global__ void cf_set_posbnd_deldop_af_krnl(struct par_t *dpar, struct deldopfrm_t **frame, struct pos_t **pos, int nframes) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm<nframes) { if (frm==0) dpar->posbnd = 1; dpar->posbnd_logfactor += frame[frm]->dof * pos[frm]->posbnd_logfactor; } } __global__ void cf_set_posbnd_doppler_af_krnl(struct par_t *dpar, struct dopfrm_t **frame, struct pos_t **pos, int nframes) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm<nframes) { if (frm==0) dpar->posbnd = 1; dpar->posbnd_logfactor += frame[frm]->dof * pos[frm]->posbnd_logfactor; } } __global__ void cf_get_exclude_seen_af_krnl(struct par_t *dpar, struct pos_t **pos, int4 *xylim, int nframes) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm < nframes) { if (threadIdx.x == 0) cfaf_exclude_seen = dpar->exclude_seen; xylim[frm].w = pos[frm]->xlim[0]; xylim[frm].x = pos[frm]->xlim[1]; xylim[frm].y = pos[frm]->ylim[0]; xylim[frm].z = pos[frm]->ylim[1]; } } __global__ void cf_get_global_frmsz_krnl(int *global_lim, int4 *xylim, int nframes) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm < nframes) { /* Initialize global_lim */ for (int i=0; i<4; i++) global_lim[i] = 0; /* Now calculate minimum for all frames */ atomicMin(&global_lim[0], xylim[frm].w); atomicMax(&global_lim[1], xylim[frm].x); atomicMin(&global_lim[2], xylim[frm].y); atomicMax(&global_lim[3], xylim[frm].z); } } __global__ void cf_mark_pixels_seen_af_krnl( struct par_t *dpar, struct mod_t *dmod, struct pos_t **pos, int *global_lim, int frame_size, int xspan, int nframes, int c) { /* nframes*npixels-threaded kernel */ int total_offset = blockIdx.x * blockDim.x + threadIdx.x; int frm = total_offset / frame_size; int offset = total_offset % frame_size; int k = (offset % xspan) + global_lim[0]; // cf_xlim0; int l = (offset / xspan) + global_lim[2]; // cf_ylim0; int facetnum; c = 0; if ((offset < frame_size) && (frm < nframes)) { if ((pos[frm]->cose_s[offset] > dpar->mincosine_seen) && (pos[frm]->f[k][l] >= 0)) { facetnum = pos[frm]->f[k][l]; //c = cf_pos->comp[k][l]; dmod->shape.comp[c].real.f[facetnum].seen = 1; } } } __global__ void cf_set_badradar_deldop_af_krnl( struct par_t *dpar, struct dat_t *ddat, struct deldopfrm_t **frame, int s, int nframes) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm < nframes) { if (threadIdx.x == 0) dpar->badradar = 1; dpar->badradar_logfactor += frame[frm]->dof * frame[frm]->badradar_logfactor / ddat->set[s].desc.deldop.nviews; } } __global__ void cf_set_badradar_doppler_af_krnl( struct par_t *dpar, struct dat_t *ddat, struct dopfrm_t **frame, int s, int nframes) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm < nframes) { if (threadIdx.x == 0) dpar->badradar = 1; dpar->badradar_logfactor +=frame[frm]->dof * frame[frm]->badradar_logfactor / ddat->set[s].desc.doppler.nviews; } } __global__ void cf_add_fit_store_af_krnl1( struct dat_t *ddat, float **fit_store, int frame_size, int s, int nframes) { /* (nframes*ndel*ndop)-threaded kernel */ int total_offset = blockIdx.x * blockDim.x + threadIdx.x; int frm = total_offset / frame_size; int offset = total_offset % frame_size; if ((offset < frame_size) && (frm < nframes)) { switch (cfaf_type) { case DELAY: fit_store[frm][offset] += ddat->set[s].desc.deldop.frame[frm].fit_s[offset]; break; case DOPPLER: fit_store[frm][offset] += ddat->set[s].desc.doppler.frame[frm].fit_s[offset]; break; } } } __global__ void cf_add_fit_store_deldop_af_krnl2( struct deldopfrm_t **frame, float *overflow, int nframes) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm < nframes) { /* overflow[0] - overflow_o2_store * overflow[1] - overflow_m2_store * overflow[2] - overflow_xsec_store * overflow[3] - overflow_dopmean_store * overflow[4] - overflow_delmean_store */ atomicAdd(&overflow[0], (float)frame[frm]->overflow_o2); atomicAdd(&overflow[1], (float)frame[frm]->overflow_m2); atomicAdd(&overflow[2], (float)frame[frm]->overflow_xsec); atomicAdd(&overflow[3], (float)frame[frm]->overflow_delmean); atomicAdd(&overflow[4], (float)frame[frm]->overflow_dopmean); } } __global__ void cf_add_fit_store_doppler_af_krnl2( struct dopfrm_t **frame, float *overflow, int nframes) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm < nframes) { /* overflow[0] - overflow_o2_store * overflow[1] - overflow_m2_store * overflow[2] - overflow_xsec_store * overflow[3] - overflow_dopmean_store */ atomicAdd(&overflow[0], (float)frame[frm]->overflow_o2); atomicAdd(&overflow[1], (float)frame[frm]->overflow_m2); atomicAdd(&overflow[2], (float)frame[frm]->overflow_xsec); atomicAdd(&overflow[3], (float)frame[frm]->overflow_dopmean); } } __global__ void cf_finish_fit_store_af_krnl( struct dat_t *ddat, float **fit_store, int s, int nThreads, int frame_size) { /* (nframes*ndel*ndop)-threaded kernel for Delay Doppler, * (nframes*ndop)-threaded kernel for Doppler */ int total_offset = blockIdx.x * blockDim.x + threadIdx.x; int frm = total_offset / frame_size; int offset = total_offset % frame_size; if (offset < nThreads) switch (cfaf_type) { case DELAY: ddat->set[s].desc.deldop.frame[frm].fit_s[offset] = fit_store[frm][offset]; break; case DOPPLER: ddat->set[s].desc.doppler.frame[frm].fit_s[offset] = fit_store[frm][offset]; break; } } __global__ void cf_finish_fit_deldop_af_krnl2( struct deldopfrm_t **frame, float *overflow, int nframes) { /* nframes-threaded Kernel */ int frm = threadIdx.x; if (frm < nframes) { /* overflow[0] = overflow_o2_store * overflow[1] = overflow_m2_store * overflow[2] = overflow_xsec_store * overflow[3] = overflow_dopmean_store * overflow[4] = overflow_delmean_store */ frame[frm]->overflow_o2 = overflow[0] / cfaf_nviews; frame[frm]->overflow_m2 = overflow[1] / cfaf_nviews; frame[frm]->overflow_xsec = overflow[2] / cfaf_nviews; frame[frm]->overflow_dopmean = overflow[3] / cfaf_nviews; frame[frm]->overflow_delmean = overflow[4] / cfaf_nviews; } } __global__ void cf_finish_fit_doppler_af_krnl2( struct dopfrm_t **frame, float *overflow, int nframes) { /* nframes-threaded Kernel */ int frm = threadIdx.x; if (frm < nframes) { /* overflow[0] = overflow_o2_store * overflow[1] = overflow_m2_store * overflow[2] = overflow_xsec_store * overflow[3] = overflow_dopmean_store */ frame[frm]->overflow_o2 = overflow[0] / cfaf_nviews; frame[frm]->overflow_m2 = overflow[1] / cfaf_nviews; frame[frm]->overflow_xsec = overflow[2] / cfaf_nviews; frame[frm]->overflow_dopmean = overflow[3] / cfaf_nviews; } } __global__ void cf_gamma_trans_deldop_af_krnl( struct par_t *dpar, struct dat_t *ddat, int s, int nframes, int frame_size) { /* Multi-threaded kernel */ int total_offset = blockIdx.x * blockDim.x + threadIdx.x; int frm = total_offset / frame_size; int offset = total_offset % frame_size; /* Each thread uses this value, so put it in shared memory */ __shared__ float dd_gamma; dd_gamma = (float)dpar->dd_gamma; if ((offset < frame_size) && (frm < nframes) && (dd_gamma != 0)) { /* Carry out a gamma transformation on the fit image if requested */ dev_gamma_trans_float(&ddat->set[s].desc.deldop.frame[frm].fit_s[offset], dd_gamma); } } __host__ void calc_deldop_cuda_af(struct par_t *dpar, struct mod_t *dmod, struct dat_t *ddat, int s, int c) { float orbit_offset[3] = {0.0, 0.0, 0.0}; int nframes, nThreads, nviews, v0_index, nx, exclude_seen, v, v2, xspan, yspan, frmsz; int *ndel, *ndop, *global_lim, *pos_n; int4 *xylim; float *overflow, **fit_store; struct deldopfrm_t **frame; struct deldopview_t **view0; struct pos_t **pos; dim3 BLK,THD; /* Get # of frames for this deldop */ hipLaunchKernelGGL(( cf_get_frames_af_krnl), dim3(1),dim3(1), 0, 0, ddat, s); checkErrorAfterKernelLaunch("cf_get_nframes_af_krnl"); gpuErrchk(hipMemcpyFromSymbol(&nframes, cfaf_nframes, sizeof(int), 0, hipMemcpyDeviceToHost)); /* Allocate memory */ cudaCalloc((void**)&frame, sizeof(struct deldopfrm_t*), nframes); cudaCalloc((void**)&view0, sizeof(struct deldopview_t*),nframes); cudaCalloc((void**)&pos, sizeof(struct pos_t*), nframes); cudaCalloc((void**)&overflow, sizeof(float), 5); cudaCalloc((void**)&ndel, sizeof(int), nframes); cudaCalloc((void**)&ndop, sizeof(int), nframes); cudaCalloc((void**)&pos_n, sizeof(int), nframes); cudaCalloc((void**)&global_lim, sizeof(int), nframes); cudaCalloc((void**)&xylim, sizeof(int4), nframes); // for (f=0; f<nframes; f++) { /* Set frame, view0, and pos */ THD.x = nframes; hipLaunchKernelGGL(( cf_set_shortcuts_deldop_af_krnl), dim3(1),dim3(THD), 0, 0, ddat, frame, view0, pos, overflow, ndel, ndop, s, nframes); checkErrorAfterKernelLaunch("cf_set_shortcuts_deldop_af_krnl"); deviceSyncAfterKernelLaunch("cf_set_shortcuts_deldop_af_krnl"); gpuErrchk(hipMemcpyFromSymbol(&nviews, cfaf_nviews, sizeof(int), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&v0_index, cfaf_v0_index, sizeof(int), 0, hipMemcpyDeviceToHost)); /* Calculate size of each frame's fit array. This assumes all frames have * the same number of doppler and delay bins. */ frmsz = ndel[0]*ndop[0]; /* If smearing is being modeled, initialize variables that * will be used to sum results calculated for individual views. */ if (nviews > 1) { /* Allocate fit_store which is a double-pointer in the af version as * each frame needs its own fit_store array */ cudaCalloc((void**)&fit_store, sizeof(float*), nframes); for (int i=0; i<nframes; i++) cudaCalloc((void**)&fit_store[i], sizeof(float), frmsz); } /* Loop over all views for this (smeared) frame, going in an order that ends with the view corresponding to the epoch listed for this frame in the obs file; this way we can use the calculated information for that view in the "write" action screen and disk output that follows */ for (v2=v0_index+1; v2<=v0_index+nviews; v2++) { v = v2 % nviews; /* Launch 9*nframes-threaded kernel to set pos->ae,pos->oe,pos->bistatic.*/ THD.x = 9; BLK.x = nframes; hipLaunchKernelGGL(( cf_set_pos_ae_deldop_af_krnl), dim3(BLK),dim3(THD), 0, 0, pos, frame, pos_n, nframes, v); checkErrorAfterKernelLaunch("cf_set_pos_ae_deldop_af_krnl"); deviceSyncAfterKernelLaunch("cf_set_pos_ae_deldop_af_krnl"); /* Configure & launch posclr_krnl to initialize POS view */ nx = 2*pos_n[0]+1; nThreads = nframes * nx * nx; BLK.x = floor((maxThreadsPerBlock - 1 + nThreads) / maxThreadsPerBlock); THD.x = maxThreadsPerBlock; // Thread block dimensions hipLaunchKernelGGL(( cf_posclr_af_krnl), dim3(BLK),dim3(THD), 0, 0, pos, pos_n[0], nx, (nx*nx), nframes); checkErrorAfterKernelLaunch("cf_posclr_af_krnl"); /* Call posvis_cuda_2 to get facet number, scattering angle, * distance toward Earth at center of each POS pixel; set flag * posbnd if any model portion extends beyond POS frame limits.*/ /* NOTE: Limited to single component for now */ if (posvis_af(dpar,dmod,ddat,orbit_offset,s,nframes,0,0,0) && v == v0_index) { /* Call single-threaded kernel to set dpar->posbnd and * dpar->posbnd_logfactor */ THD.x = nframes; hipLaunchKernelGGL(( cf_set_posbnd_deldop_af_krnl), dim3(BLK),dim3(THD), 0, 0, dpar,frame,pos,nframes); checkErrorAfterKernelLaunch("cf_set_posbnd_deldop_af_krnl"); } /* Launch nframes-threaded kernel to get dpar->exclude_seen */ THD.x = nframes; hipLaunchKernelGGL(( cf_get_exclude_seen_af_krnl), dim3(1),dim3(THD), 0, 0, dpar,pos,xylim,nframes); checkErrorAfterKernelLaunch("cf_get_exclude_seen_af_krnl"); gpuErrchk(hipMemcpyFromSymbol(&exclude_seen, cfaf_exclude_seen, sizeof(int), 0, hipMemcpyDeviceToHost)); /* Get the largest pos->xlim and ylim values for all frames */ hipLaunchKernelGGL(( cf_get_global_frmsz_krnl), dim3(1),dim3(THD), 0, 0, global_lim, xylim, nframes); checkErrorAfterKernelLaunch("cf_get_global_frmsz_krnl"); deviceSyncAfterKernelLaunch("cf_get_global_frmsz_krnl"); /* Go through all POS pixels which are visible with low enough * scattering angle and mark the facets which project onto their * centers as having been "seen" at least once */ if (s != exclude_seen && v == v0_index) { xspan = global_lim[1] - global_lim[0] + 1; // xlim1 - xlim0 + 1; yspan = global_lim[3] - global_lim[2] + 1; // ylim1 - ylim0 + 1; nThreads = nframes * xspan * yspan; /* Configure & launch posclr_af_krnl to initialize POS view */ BLK.x = floor((maxThreadsPerBlock - 1 + nThreads) / maxThreadsPerBlock); THD.x = maxThreadsPerBlock; // Thread block dimensions hipLaunchKernelGGL(( cf_mark_pixels_seen_af_krnl), dim3(BLK),dim3(THD), 0, 0, dpar, dmod, pos, global_lim, (xspan*yspan), xspan, nframes, c); checkErrorAfterKernelLaunch("cf_mark_pixels_seen_af_krnl"); } /* Zero out the fit delay-Doppler image, then call pos2deldop to * create the fit image by mapping power from the plane of the sky * to delay-Doppler space. */ deviceSyncAfterKernelLaunch("pre-clrvect_krnl sync in calc_fits_cuda_af.cu"); nThreads = frmsz * nframes; BLK.x = floor((maxThreadsPerBlock-1 + nThreads)/maxThreadsPerBlock); THD.x = maxThreadsPerBlock; // Thread block dimensions hipLaunchKernelGGL(( clrvect_af_krnl), dim3(BLK),dim3(THD), 0, 0, ddat, s, nframes, nThreads, frmsz); checkErrorAfterKernelLaunch("clrvect_af_krnl, calc_fits_cuda"); if (pos2deldop_cuda_af(dpar,dmod,ddat,0.0,0.0,0.0,0, s,nframes,v)) { /* Call single-threaded kernel to set badradar flag and * associated badradar_logfactor */ THD.x = nframes; hipLaunchKernelGGL(( cf_set_badradar_deldop_af_krnl), dim3(1),dim3(THD), 0, 0, dpar, ddat, frame, s, nframes); checkErrorAfterKernelLaunch("cf_set_badradar_deldop_af_krnl"); } /* If smearing is being modeled, include delay-Doppler calculations * from this view in the summed results for this frame */ if (nviews > 1) { /* Launch ndel*ndop-threaded kernel to add fit[i][j] to * fit_store[i][j]*/ /* frmsz and nThreads are still accurate from the clrvect call */ BLK.x = floor((maxThreadsPerBlock-1 + nThreads)/maxThreadsPerBlock); THD.x = maxThreadsPerBlock; hipLaunchKernelGGL(( cf_add_fit_store_af_krnl1), dim3(BLK),dim3(THD), 0, 0, ddat,fit_store,frmsz,s,nframes); checkErrorAfterKernelLaunch("cf_add_fit_store_af_krnl1"); THD.x = nframes; hipLaunchKernelGGL(( cf_add_fit_store_deldop_af_krnl2), dim3(1),dim3(THD), 0, 0, frame,overflow,nframes); checkErrorAfterKernelLaunch("cf_add_fit_store_deldop_af_krnl2"); } } /* end views loop */ /* If smearing is being modeled, compute mean values over all views for * this frame and store them in the standard frame structure */ /* This kernel also carries out the gamma transformation on the fit * image if the par->dd_gamma flag is not set */ if (nviews > 1) { /* Launch (nframes*ndel*ndop)-threaded kernel */ nThreads = frmsz*nframes; BLK.x = floor((maxThreadsPerBlock-1 + nThreads)/maxThreadsPerBlock); THD.x = maxThreadsPerBlock; hipLaunchKernelGGL(( cf_finish_fit_store_af_krnl), dim3(BLK),dim3(THD), 0, 0, ddat,fit_store,s,nThreads,frmsz); checkErrorAfterKernelLaunch("cf_finish_fit_af_store"); THD.x = nframes; hipLaunchKernelGGL(( cf_finish_fit_deldop_af_krnl2), dim3(1),dim3(THD), 0, 0, frame,overflow,nframes); checkErrorAfterKernelLaunch("cf_finish_fit_deldop_af_krnl2"); THD.x = maxThreadsPerBlock; hipLaunchKernelGGL(( cf_gamma_trans_deldop_af_krnl), dim3(BLK),dim3(THD), 0, 0, dpar, ddat, s, nframes, frmsz); checkErrorAfterKernelLaunch("cf_gamma_trans_krnl"); hipFree(fit_store); } //} /* end loop over frames */ /* De-allocate memory */ hipFree(frame); hipFree(view0); hipFree(pos); hipFree(overflow); hipFree(ndel); hipFree(ndop); hipFree(pos_n); hipFree(global_lim); hipFree(xylim); } __host__ void calc_doppler_cuda_af(struct par_t *dpar, struct mod_t *dmod, struct dat_t *ddat, int s, int c) { float orbit_offset[3] = {0.0, 0.0, 0.0}; float **fit_store, *overflow; int *ndop, *pos_n, *global_lim, v0_index, frmsz, nThreads, exclude_seen, nviews, nframes, nx, f, v, v2, xspan, yspan; dim3 BLK,THD; struct dopfrm_t **frame; struct dopview_t **view0; struct pos_t **pos; int4 *xylim; /* Get # of frames for this deldop */ hipLaunchKernelGGL(( cf_get_frames_af_krnl), dim3(1),dim3(1), 0, 0, ddat, s); checkErrorAfterKernelLaunch("cf_get_nframes_krnl (calc_deldop_cuda)"); gpuErrchk(hipMemcpyFromSymbol(&nframes, cfaf_nframes, sizeof(int), 0, hipMemcpyDeviceToHost)); /* Allocate memory */ cudaCalloc((void**)&frame, sizeof(struct dopfrm_t*), nframes); cudaCalloc((void**)&view0, sizeof(struct dopview_t*),nframes); cudaCalloc((void**)&pos, sizeof(struct pos_t*), nframes); cudaCalloc((void**)&overflow, sizeof(float), 4); cudaCalloc((void**)&ndop, sizeof(int), nframes); cudaCalloc((void**)&pos_n, sizeof(int), nframes); cudaCalloc((void**)&global_lim, sizeof(int), nframes); cudaCalloc((void**)&xylim, sizeof(int4), nframes); // for (f=0; f<nframes; f++) { /* Set frame, view0, and pos */ THD.x = nframes; hipLaunchKernelGGL(( cf_set_shortcuts_doppler_af_krnl), dim3(1),dim3(THD), 0, 0, ddat, s, nframes, frame, ndop, view0, pos, overflow, xylim); checkErrorAfterKernelLaunch("cf_set_shortcuts_doppler_af_krnl"); deviceSyncAfterKernelLaunch("cf_set_shortcuts_doppler_af_krnl"); gpuErrchk(hipMemcpyFromSymbol(&nviews, cfaf_nviews, sizeof(int), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&v0_index, cfaf_v0_index, sizeof(int), 0, hipMemcpyDeviceToHost)); /* Calculate size of each frame's fit array. This assumes all frames have * the same number of doppler. */ frmsz = ndop[0]; /* If smearing is being modeled, initialize variables that * will be used to sum results calculated for individual views. */ if (nviews > 1) { /* Allocate fit_store which is a double-pointer in the af version as * each frame needs its own fit_store array */ cudaCalloc((void**)&fit_store, sizeof(float*), nframes); for (int i=0; i<nframes; i++) cudaCalloc((void**)&fit_store[i], sizeof(float), frmsz); } /* Loop over all views for this (smeared) frame, going in an order that * ends with the view corresponding to the epoch listed for this frame * in the obs file; this way we can use the calculated information for * that view in the "write" action screen and disk output that follows*/ for (v2=v0_index+1; v2<=v0_index+nviews; v2++) { v = v2 % nviews; /* Launch 9*nframes-threaded kernel to set pos->ae,pos->oe,pos->bistatic.*/ THD.x = 9; BLK.x = nframes; hipLaunchKernelGGL(( cf_set_pos_ae_doppler_af_krnl), dim3(BLK),dim3(THD), 0, 0, pos,frame,pos_n,nframes,v); checkErrorAfterKernelLaunch("cf_set_pos_ae_doppler_af_krnl"); deviceSyncAfterKernelLaunch("cf_set_pos_ae_doppler_af_krnl"); /* Configure & launch posclr_krnl to initialize POS view */ nx = 2*pos_n[0]+1; nThreads = nframes * nx * nx; BLK.x = floor((maxThreadsPerBlock-1 + nThreads)/maxThreadsPerBlock); THD.x = maxThreadsPerBlock; hipLaunchKernelGGL(( cf_posclr_af_krnl), dim3(BLK),dim3(THD), 0, 0, pos, pos_n[0], nx, (nx*nx), nframes); checkErrorAfterKernelLaunch("cf_posclr_af_krnl"); /* Call posvis_cuda_2 to get facet number, scattering angle, * distance toward Earth at center of each POS pixel; set flag * posbnd if any model portion extends beyond POS frame limits.*/ /* NOTE: Limited to single component for now */ if (posvis_af(dpar,dmod,ddat,orbit_offset,s,nframes,0,0,0) && v == v0_index) { /* Call single-threaded kernel to set dpar->posbnd and * dpar->posbnd_logfactor */ THD.x = nframes; hipLaunchKernelGGL(( cf_set_posbnd_doppler_af_krnl), dim3(1),dim3(THD), 0, 0, dpar,frame,pos,nframes); checkErrorAfterKernelLaunch("cf_set_posbnd_doppler_af_krnl"); } /* Launch nframes-threaded kernel to get dpar->exclude_seen */ THD.x = nframes; hipLaunchKernelGGL(( cf_get_exclude_seen_af_krnl), dim3(1),dim3(THD), 0, 0, dpar,pos,xylim,nframes); checkErrorAfterKernelLaunch("cf_get_exclude_seen_af_krnl"); gpuErrchk(hipMemcpyFromSymbol(&exclude_seen, cfaf_exclude_seen, sizeof(int), 0, hipMemcpyDeviceToHost)); /* Get the largest pos->xlim and ylim values for all frames */ hipLaunchKernelGGL(( cf_get_global_frmsz_krnl), dim3(1),dim3(THD), 0, 0, global_lim, xylim, nframes); checkErrorAfterKernelLaunch("cf_get_global_frmsz_krnl"); deviceSyncAfterKernelLaunch("cf_get_global_frmsz_krnl"); /* Go through all POS pixels visible with low enough scattering * angle and mark the facets which project onto their centers as * having been "seen" at least once */ /* I'll launch a multi-threaded kernel here: * (xlim1 - xlim0 + 1)^2 threads */ if (s != exclude_seen && v == v0_index) { xspan = global_lim[1] - global_lim[0] + 1; // xlim1 - xlim0 + 1; yspan = global_lim[3] - global_lim[2] + 1; // ylim1 - ylim0 + 1; nThreads = nframes * xspan * yspan; /* Configure & launch posclr_af_krnl to initialize POS view */ BLK.x = floor((maxThreadsPerBlock-1+nThreads)/maxThreadsPerBlock); THD.x = maxThreadsPerBlock; hipLaunchKernelGGL(( cf_mark_pixels_seen_af_krnl), dim3(BLK),dim3(THD), 0, 0, dpar, dmod, pos, global_lim, (xspan*yspan), xspan, nframes, c); checkErrorAfterKernelLaunch("cf_mark_pixels_seen_af_krnl"); } /* Zero out fit Doppler spectrum, then call pos2doppler to create * the fit image by mapping power from the plane of the sky to * Doppler space. */ deviceSyncAfterKernelLaunch("pre-clrvect_krnl sync in calc_fits_cuda_af.cu"); nThreads = frmsz * nframes; BLK.x = floor((maxThreadsPerBlock-1+nThreads)/maxThreadsPerBlock); THD.x = maxThreadsPerBlock; hipLaunchKernelGGL(( clrvect_af_krnl), dim3(BLK),dim3(THD), 0, 0, ddat, s, nframes, nThreads, frmsz); checkErrorAfterKernelLaunch("clrvect_af_krnl"); deviceSyncAfterKernelLaunch("clrvect_af_krnl"); if (pos2doppler_cuda_af(dpar,dmod,ddat,0.0,0.0,0.0,0, s,nframes,v)) { /* nframes-threaded kernel to set badradar flag and calc. logfactor*/ THD.x = nframes; hipLaunchKernelGGL(( cf_set_badradar_doppler_af_krnl), dim3(1),dim3(THD), 0, 0, dpar,ddat,frame,s,nframes); checkErrorAfterKernelLaunch("cf_set_badradar_doppler_af_krnl"); } /* If smearing is being modeled, include the Doppler calculations from * this view in the summed results for this frame */ if (nviews > 1) { /* Launch ndop-threaded kernel to add fit[i][j] to fit_store[i][j]*/ BLK.x = floor((maxThreadsPerBlock-1 + nThreads)/maxThreadsPerBlock); THD.x = maxThreadsPerBlock; hipLaunchKernelGGL(( cf_add_fit_store_af_krnl1), dim3(BLK),dim3(THD), 0, 0, ddat,fit_store,frmsz,s,nframes); checkErrorAfterKernelLaunch("cf_add_fit_store_af_krnl1"); THD.x = nframes; hipLaunchKernelGGL(( cf_add_fit_store_doppler_af_krnl2), dim3(1),dim3(THD), 0, 0, frame,overflow,nframes); checkErrorAfterKernelLaunch("cf_add_fit_store_doppler_af_krnl2"); } } /* If smearing is being modeled, compute mean values over all views for * this frame and store them in the standard frame structure */ /* This kernel also carries out the gamma transformation on the fit * image if the par->dd_gamma flag is not set */ if (nviews > 1) { /* Launch (nframes*ndop)-threaded kernel */ nThreads = frmsz*nframes; BLK.x = floor((maxThreadsPerBlock-1 + nThreads)/maxThreadsPerBlock); THD.x = maxThreadsPerBlock; hipLaunchKernelGGL(( cf_finish_fit_store_af_krnl), dim3(BLK),dim3(THD), 0, 0, ddat,fit_store,s,nThreads,frmsz); checkErrorAfterKernelLaunch("cf_finish_fit_af_store"); THD.x = nframes; hipLaunchKernelGGL(( cf_finish_fit_doppler_af_krnl2), dim3(1),dim3(THD), 0, 0, frame,overflow,nframes); checkErrorAfterKernelLaunch("cf_finish_fit_deldop_af_krnl2"); hipFree(fit_store); } /* De-allocate memory */ hipFree(frame); hipFree(view0); hipFree(pos); hipFree(overflow); hipFree(ndop); hipFree(pos_n); hipFree(global_lim); hipFree(xylim); // } /* end loop over frames */ } //void calc_poset( struct par_t *par, struct mod_t *mod, struct poset_t *poset, // int s) //{ // const char *monthName[12] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun", // "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}; // double orbit_offset[3] = {0.0, 0.0, 0.0}; // // FILE *fpopt; // char tempstring[MAXLEN], name[MAXLEN]; // int year, mon, day, hour, min, sec, f, c, i, j, k, l, nrow_fit, ncol_fit, n_pos, // facetnum, x, y, v, v2; // double w[3], spin_colat, spin_azim, xoff, yoff, resamp_fact, resamp_x0, resamp_y0, // xcom_fit, ycom_fit, resamp_xwidth, resamp_ywidth, resamp_angle, oa[3][3], // to_earth[3], to_earth_lat, to_earth_long, rotphase, sa[3][3], to_sun[3], // to_sun_lat, to_sun_long, pab[3], pab_lat, pab_long, intensityfactor, // phi, theta, psi, intspin_body[3], badposet_logfactor_view; // double **fit_store; // struct posetfrm_t *frame; // struct posetview_t *view0; // struct pos_t *pos; // // for (f=0; f<poset->nframes; f++) { // // frame = &poset->frame[f]; // view0 = &frame->view[poset->v0]; // pos = &frame->pos; // // ncol_fit = frame->ncol; // nrow_fit = frame->nrow; // // /* If smearing is being modeled, initialize variables that // will be used to sum results calculated for individual views */ // // if (poset->nviews > 1) { // fit_store = matrix( 1, ncol_fit, 1, nrow_fit); // for (i=1; i<=ncol_fit; i++) // for (j=1; j<=nrow_fit; j++) // fit_store[i][j] = 0.0; // } // // /* Loop over all views for this (smeared) frame, going in an order that // ends with the view corresponding to the epoch listed for this frame // in the obs file; this way we can use the calculated information for // that view in the "write" action screen and disk output that follows */ // // for (v2=poset->v0+1; v2<=poset->v0+poset->nviews; v2++) { // v = v2 % poset->nviews; // // for (i=0; i<=2; i++) // for (j=0; j<=2; j++) { // pos->ae[i][j] = frame->view[v].ae[i][j]; // pos->oe[i][j] = frame->view[v].oe[i][j]; // pos->se[i][j] = frame->view[v].se[i][j]; // } // pos->bistatic = 1; // // /* Initialize the plane-of-sky view */ // // posclr( pos); // // /* Call routine posvis to get the facet number, scattering angle, // incidence angle, and distance toward Earth at the center of // each POS pixel; set the posbnd parameter to 1 if any portion // of the model extends beyond the POS frame limits. */ // // for (c=0; c<mod->shape.ncomp; c++) // if (posvis( &mod->shape.comp[c].real, orbit_offset, pos, // (int) par->pos_smooth, 0, 0, c) && v == poset->v0) { // par->posbnd = 1; // if (pos->bistatic) // par->posbnd_logfactor += 0.5 * frame->dof * pos->posbnd_logfactor; // else // par->posbnd_logfactor += frame->dof * pos->posbnd_logfactor; // } // // /* Now view the model from the source (sun) and get the facet number // and distance toward the source of each pixel in this projected view; // use this information to determine which POS pixels are shadowed */ // // if (pos->bistatic) { // for (c=0; c<mod->shape.ncomp; c++) // if (posvis( &mod->shape.comp[c].real, orbit_offset, pos, // 0, 1, 0, c)) { // par->posbnd = 1; // par->posbnd_logfactor += 0.5 * frame->dof * pos->posbnd_logfactor; // } // // /* Identify and mask out shadowed POS pixels */ // // posmask( pos, par->mask_tol); // } // // /* Go through all POS pixels which are visible and unshadowed with // sufficiently low scattering and incidence angles, and mark the facets // which project onto their centers as having been "seen" at least once */ // // if (s != par->exclude_seen && v == poset->v0) { // for (k=pos->xlim[0]; k<=pos->xlim[1]; k++) // for (l=pos->ylim[0]; l<=pos->ylim[1]; l++) { // if ((pos->cose[k][l] > par->mincosine_seen) // && (pos->cosi[k][l] > par->mincosine_seen) // && (pos->f[k][l] >= 0)) { // facetnum = pos->f[k][l]; // c = pos->comp[k][l]; // mod->shape.comp[c].real.f[facetnum].seen = 1; // } // } // } // // /* Compute the sky rendering */ // // intensityfactor = pow( pos->km_per_pixel/AU, 2.0); // apply_photo( mod, poset->ioptlaw, frame->view[v].solar_phase, // intensityfactor, pos, 0); // // /* Resample the sky rendering to get the model plane-of-sky image */ // /* (if using bicubic interpolation or cubic convolution, force */ // /* all model pixel values to be nonnegative) */ // /* */ // /* Implement the x and y COM offsets, xoff and yoff, by first */ // /* using them to compute xcom_fit and ycom_fit -- the COM position */ // /* in the fit image, relative to the center of the fit image -- and */ // /* then shifting the resampled region in the *opposite* direction */ // /* by the appropriate proportional amount. Then implement the */ // /* "northangle" setting (clockwise heading of north) by rotating */ // /* the resampling grid *counterclockwise* by northangle. */ // // n_pos = pos->n; // xoff = frame->off[0].val; // yoff = frame->off[1].val; // xcom_fit = (frame->colcom_vig - (ncol_fit + 1)/2.0) + xoff; // ycom_fit = (frame->rowcom_vig - (nrow_fit + 1)/2.0) + yoff; // resamp_fact = frame->fit.km_per_pixel / pos->km_per_pixel; // resamp_x0 = -xcom_fit*resamp_fact; // resamp_y0 = -ycom_fit*resamp_fact; // resamp_xwidth = resamp_fact*(ncol_fit - 1); // resamp_ywidth = resamp_fact*(nrow_fit - 1); // resamp_angle = -frame->northangle; // resampim( frame->pos.b, -n_pos, n_pos, -n_pos, n_pos, // frame->fit.b, 1, ncol_fit, 1, nrow_fit, // resamp_x0, resamp_xwidth, resamp_y0, resamp_ywidth, resamp_angle, // (int) par->poset_resample, (int) par->image_rebin); // if (par->poset_resample == BICUBIC || par->poset_resample == CUBICCONV) { // for (k=1; k<=ncol_fit; k++) // for (l=1; l<=nrow_fit; l++) // frame->fit.b[k][l] = MAX( 0.0, frame->fit.b[k][l]); // } // // /* Set the badposet flag and increase badposet_logfactor if the model */ // /* plane-of-sky image is too small to "contain" all of the sky */ // /* rendering's nonzero pixels. */ // // if (checkposet( pos->b, -n_pos, n_pos, -n_pos, n_pos, // resamp_x0, resamp_xwidth, resamp_y0, resamp_ywidth, resamp_angle, // &badposet_logfactor_view)) { // par->badposet = 1; // par->badposet_logfactor += frame->dof * badposet_logfactor_view // / poset->nviews; // } // // /* If smearing is being modeled, include the plane-of-sky // calculations from this view in the summed results for this frame */ // // if (poset->nviews > 1) // for (i=1; i<=ncol_fit; i++) // for (j=1; j<=nrow_fit; j++) // fit_store[i][j] += frame->fit.b[i][j]; // // } // // /* If smearing is being modeled, compute mean values over all views // for this frame and store them in the standard frame structure */ // // if (poset->nviews > 1) { // for (i=1; i<=ncol_fit; i++) // for (j=1; j<=nrow_fit; j++) // frame->fit.b[i][j] = fit_store[i][j] / poset->nviews; // free_matrix( fit_store, 1, ncol_fit, 1, nrow_fit); // } // // // } /* end loop over frames */ //} // // //void calc_lghtcrv( struct par_t *par, struct mod_t *mod, struct lghtcrv_t *lghtcrv, // int s) //{ // const char *monthName[12] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun", // "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}; // double orbit_offset[3] = {0.0, 0.0, 0.0}; // // FILE *fpopt; // char tempstring[MAXLEN], name[MAXLEN]; // int year, mon, day, hour, min, sec, n, ncalc, c, i, i_mid, j, k, l, facetnum, // n_cross360, n_projectedpixels, n_shadowedpixels, x, y, v; // double epoch_mid, epoch_diff_min, epoch_diff, w[3], spin_colat, spin_azim, oa[3][3], // rotphase, sa[3][3], to_sun[3], to_sun_lat, to_sun_long, pab[3], pab_lat, // pab_long, intensityfactor, phi, theta, psi, intspin_body[3], posbnd_logfactor, // projected_area, lambertdisk_intensity, interp; // double **to_earth, *to_earth_lat, *to_earth_long, *rotphase_unwrapped; // struct crvrend_t *rend; // struct pos_t *pos; // // /* Initialize variables to avoid compilation warning */ // // i_mid = 0; // epoch_mid = epoch_diff = epoch_diff_min = 0.0; // n_cross360 = 0; // to_earth = NULL; // to_earth_lat = to_earth_long = rotphase_unwrapped = NULL; // // /* Initialize variables dealing with bad models */ // // posbnd_logfactor = 0.0; // // /* Get n, the number of observed points for this lightcurve, // and ncalc, the number of epochs at which model lightcurve // brightnesses are to be computed */ // // n = lghtcrv->n; // ncalc = lghtcrv->ncalc; // // /* Calculate the model lightcurve values at each of the user-specified // epochs x[i], with i=1,2,...,ncalc; these may or may not be the same as the // epochs t[i] (i=1,2,...,n) at which actual lightcurve observations were made. */ // // for (i=1; i<=ncalc; i++) { // // rend = &lghtcrv->rend[i]; // pos = &rend->pos; // // for (j=0; j<=2; j++) // for (k=0; k<=2; k++) { // pos->ae[j][k] = rend->ae[j][k]; // pos->oe[j][k] = rend->oe[j][k]; // pos->se[j][k] = rend->se[j][k]; // } // pos->bistatic = 1; // // /* Initialize the plane-of-sky view */ // // posclr( pos); // // /* Call routine posvis to get the facet number, scattering angle, // incidence angle, and distance toward Earth at the center of // each POS pixel; set the posbnd parameter to 1 if any portion // of the model extends beyond the POS frame limits. */ // // for (c=0; c<mod->shape.ncomp; c++) // if (posvis( &mod->shape.comp[c].real, orbit_offset, pos, // (int) par->pos_smooth, 0, 0, c)) { // par->posbnd = 1; // if (pos->bistatic) // posbnd_logfactor += 0.5 * pos->posbnd_logfactor; // else // posbnd_logfactor += pos->posbnd_logfactor; // } // // /* Now view the model from the source (sun) and get the facet number // and distance toward the source of each pixel in this projected view; // use this information to determine which POS pixels are shadowed */ // // if (pos->bistatic) { // for (c=0; c<mod->shape.ncomp; c++) // if (posvis( &mod->shape.comp[c].real, orbit_offset, pos, // 0, 1, 0, c)) { // par->posbnd = 1; // posbnd_logfactor += 0.5 * pos->posbnd_logfactor; // } // // /* Identify and mask out shadowed POS pixels */ // // posmask( pos, par->mask_tol); // } // // /* Go through all POS pixels which are visible and unshadowed with // sufficiently low scattering and incidence angles, and mark the facets // which project onto their centers as having been "seen" at least once */ // // if (s != par->exclude_seen) { // for (k=pos->xlim[0]; k<=pos->xlim[1]; k++) // for (l=pos->ylim[0]; l<=pos->ylim[1]; l++) { // if ((pos->cose[k][l] > par->mincosine_seen) // && (pos->cosi[k][l] > par->mincosine_seen) // && (pos->f[k][l] >= 0)) { // facetnum = pos->f[k][l]; // c = pos->comp[k][l]; // mod->shape.comp[c].real.f[facetnum].seen = 1; // } // } // } // // /* Compute the model brightness for this model lightcurve point */ // // intensityfactor = pow( pos->km_per_pixel/AU, 2.0); // lghtcrv->y[i] = apply_photo( mod, lghtcrv->ioptlaw, lghtcrv->solar_phase[i], // intensityfactor, pos, 0); // // /* Finished with this calculated lightcurve point */ // // } // // /* Now that we have calculated the model lightcurve brightnesses y at each // of the epochs x, we use cubic spline interpolation (Numerical Recipes // routines spline and splint) to get model lightcurve brightness fit[i] // at each OBSERVATION epoch t[i], with i=1,2,...,n. This will allow us // (in routine chi2) to compare model to data (fit[i] to obs[i]) to get // chi-square. Note that vector y2 contains the second derivatives of // the interpolating function at the calculation epochs x. // // Smearing is handled by interpolating the brightness at the time t of // each individual view and then taking the mean of all views that // correspond to a given observed lightcurve point. */ // // spline( lghtcrv->x, lghtcrv->y, ncalc, 2.0e30, 2.0e30, lghtcrv->y2); // for (i=1; i<=n; i++) { // lghtcrv->fit[i] = 0.0; // for (v=0; v<lghtcrv->nviews; v++) { // splint( lghtcrv->x, lghtcrv->y, lghtcrv->y2, ncalc, // lghtcrv->t[i][v], &interp); // lghtcrv->fit[i] += interp; // } // lghtcrv->fit[i] /= lghtcrv->nviews; // } // // /* Deal with flags for model that extends beyond the POS frame */ // // par->posbnd_logfactor += lghtcrv->dof * (posbnd_logfactor/ncalc); // //}
98e65975097a503d019fd901ebc7dd48dad386a7.cu
/***************************************************************************************** calc_fits.c As the name implies, this routine calculates the fits to each data frame for the current set of model parameters. For example, for each delay-Doppler frame it calls routine posvis to create the model plane-of-sky image and then routine pos2deldop to create the model delay-Doppler image from this POS image. calc_fits also performs some of the screen and file output required by the "write" action; in particular, it carries out tasks that require information associated with plane-of-sky renderings, since such information is quickly overwritten if the "pos_scope" parameter is set to "global" (i.e., if all frames and lightcurve points share the same memory for their "pos" structures). Modified 2015 June 10 by CM: Implement smearing for the "fit" and "write" actions Modified 2014 February 14 by CM: Add "ilaw" argument to the apply_photo routine Modified 2013 July 28 by CM: For the "write" action, output ppm POS images when the "write_highlight" parameter is turned on Modified 2013 July 7 by CM: For the "write" action for lightcurve points and plane-of-sky frames, display the body-fixed longitude and latitude of the phase-angle bisector Modified 2013 June 25 by CM: Allow POS images written for optical data to be annotated with principal-axis shafts and the angular momentum vector For POS images (sky renderings), display the name of the image file and the maximum pixel value in the plot_surface routine (called by the write_pos routine) rather than here Modified 2013 April 24 by CM: Implement the "listpos_deldop" "listpos_opt" and "listpos_path" parameters Adjust names of output images so they are in alphanumeric order if > 100 per dataset Modified 2012 April 2 by CM: Correct instantaneous maximum breadth calculation for Doppler scaling factor Modified 2011 August 14 by CM: Display sidereal spin vector at each epoch, even for a PA rotator, if any spin impulses are used Modified 2010 September 1 by CM: Initialize variables to avoid compilation warnings Modified 2010 July 29 by CM: Fix bug introduced in calc_lghtcrv: rotation phases weren't being displayed for the "write" action For the "write" action for lightcurve datasets, include shadowed regions in projected area (and geometric albedo calculation) and display percentage of projected area that's shadowed Modified 2010 June 15 by CM: Revise arguments to pos2deldop and pos2doppler routines Modified 2010 May 28 by CM: Fix bug introduced with preceding change: in calc_lghtcrv, only deallocate memory for the "write" action (since it wasn't allocated in the first place for other actions) Modified 2010 May 24 by CM: For the "write" action for lightcurves, output the projected area and (for absolute photometry) geometric albedo Modified 2010 April 12 by CM: For the "write" action, include overflow region when computing cross sections Modified 2009 July 29 by CM: For the "write" action, fix bug: output ppm images rather than pgm images if the "plot_angmom" parameter is turned on For the "write" action, pass an argument to the "write_pos" routine explicitly telling it whether or not to produce a colored image Modified 2009 April 3 by CM: Initialize the "posbnd_logfactor" parameter and later set it for models that extend beyond the POS frame Add "badposet" and "badposet_logfactor" parameters: initialize them here and then use the new "checkposet" routine to adjust them for plane-of-sky fit images that are too small to "contain" the target Add "badradar" and "badradar_logfactor" parameters: initialize them here and then use the "pos2deldop" and "pos2doppler" routines (which are now int rather than void) to adjust them for models that are too wide in delay-Doppler space for the routines to handle Add "warn_badradar" argument to pos2deldop and pos2doppler routines For the "write" action, display each plane-of-sky fit frame's linear dimensions, the linear dimensions of the rectangular subset that contains the target, and the linear COM offsets Modified 2008 December 12 by CM: For the "write" action for NPA rotators, list Euler angles (giving the body-fixed axes' orientations in ecliptic coordinates) and spin vector components (in body-fixed coordinates) for each observation epoch For the "write" action for NPA rotators, ensure that maximum breadth is nonnegative Modified 2007 August 10 by CM: Eliminated unused variables and cleaned up a printf format For POS model frames (sky renderings) associated with lightcurve points and with plane-of-sky data frames, don't display the maximum pixel value unless the "optposmax" parameter is nonzero Modified 2007 August 4 by CM: Add comp matrix for POS frames Add orbit_offset and body arguments to posvis routine and remove facet argument Add orbit_xoff, orbit_yoff, orbit_dopoff and body parameters to pos2deldop and pos2doppler routines Add body argument to apply_photo routine Modified 2007 January 17 by CM: For the "write" action, display instantaneous folded zero-crossing bandwidth for Doppler and delay-Doppler frames Modified 2007 January 11 by CM: In calc_lghtcrv for the "write" action, count lightcurve points from 0 rather than 1, as is already done for lightcurve POS images (and for Doppler, delay-Doppler, and plane-of-sky frames) Modified 2007 January 6 by CM: In calc_lghtcrv for the "write" action, save rotation phase for each calculated lightcurve point so they can be output by routine chi2, and use cubic spline interpolation to obtain rotation phase at each observation epoch. Also display range of rotation phases if only one calculated point per lightcurve is displayed in full Modified 2006 October 1 by CM: In calc_lghtcrv, model lightcurve points are now intensities (relative to the solar intensity) rather than magnitudes In calc_lghtcrv and calc_poset, apply_photo routine has been revised to account for the POS pixel area and the 1 AU Sun-target distance Modified 2006 September 1 by CM and MCN: When "exclude_seen" parameter is used, add check that facet number pos->f[i][j] is nonnegative For the "write" action, don't display cross sections and albedos for uncalibrated (delay-)Doppler frames Modified 2006 June 21 by CM: In calc_deldop, changed delres to del_per_pixel and dopres to dop_per_pixel In calc_doppler, changed dopres to dop_per_bin For POS renderings and plane-of-sky fit frames, changed res to km_per_pixel Modified 2006 June 18 by CM: Allow each delay-Doppler frame within a dataset to have different dimensions after vignetting Allow each Doppler frame within a dataset to have different dimensions after vignetting Allow plane-of-sky frames to be rectangular rather than square, and no longer require an odd number of pixels per side Eliminate range datasets Modified 2006 March 10 by CM: Add "speckle" argument to pos2deldop and pos2doppler routines Modified 2005 October 6 by CM: For lightcurve datasets, replace SUNMAG constant by "sun_appmag" parameter, so that absolute photometry with filters other than V band can be used Modified 2005 July 25 by CM: For "write" action, display the model radar cross section and albedo for each delay-Doppler and Doppler frame Modified 2005 July 22 by CM: Created five separate routines for writing POS frames as images so that they can be called separately if the "mark_unseen" parameter is turned on for the "write" action (since in this case we must first process all datasets to see which model facets were "seen" and only then can write the POS images) Modified 2005 July 14 by CM: Fix bug in computing LE-to-COM delay and distance, LE-to-TE delay and distance, and instantantaneous bandwidth and breadth Modified 2005 July 13 by CM: For "write" action for lightcurve points and plane-of-sky frames, display the body-fixed longitude and latitude of the Sun-to-asteroid line Modified 2005 July 5 by CM: Remove the "dir" argument from pos2deldop and pos2doppler and add the "set" argument Modified 2005 July 3 by CM: For "write" action for lightcurve datasets, implement the "lcrv_writeall" parameter, which produces screen display for every model lightcurve point rather than just the one point which falls closest to the midpoint of the observations. Modified 2005 June 25 by CM: For "write" action for delay-Doppler frames, display the delay and distance between the leading edge and the center of mass and between the leading edge and the trailing edge; for delay-Doppler and Doppler frames, display the instantaneous zero-crossing bandwidth and maximum breadth. All of the above are obtained from the model's delay-Doppler limits as determined PRIOR to convolution with the delay and Doppler response functions. Modified 2005 June 22 by CM: Keep track of which model facets have been "seen" (i.e., are visible from Earth, are unshadowed, and have sufficiently low scattering and incidence angles) in at least one data frame or lightcurve point Modified 2005 April 23 by CM: For the "write" action, list whether or not epochs have been corrected for one-way light travel time Modified 2005 March 1 by CM: Adjust arguments to the revised "resampim" routine to permit rotation of resampled plane-of-sky frames Initialize the "posbnd" parameter (flag indicating that the model extends beyond the model POS frame) to 0 here rather than in bestfit.c so that it can used for actions other than "fit" Fix bug in calc_poset which was incorrectly flagging the model as being too small for the model POS frame Modified 2005 February 21 by CM: Use the new "poset_resample" parameter to allow interpolation methods other than bilinear for constructing plane-of-sky fit images for plane-of-sky data frames Add the new "image_rebin" argument to function resampim to handle plane-of-sky fit frames which have much coarser resolution than the model POS frames from which they are constructed (i.e., which are greatly undersampled) For "write" action, display maximum pixel value for model POS images for plane-of-sky frames and calculated lightcurve images (in case someone wants to use the "optposmax" parameter to truncate the image brightness) Modified 2005 February 6 by CM: For "write" action, display rotation phase For "write" action, fix bug in computing the angular body-fixed coordinates of the line of sight for lightcurve datasets Modified 2005 January 25 by CM: Take care of unused and uninitialized variables Modified 2005 January 24 by CM: Add "calc_poset" routine to handle POS datasets For "write" action, display the angular body-fixed coordinates of the line of sight For "write" action, display calendar dates in addition to Julian dates For "write" action, display the date for range datasets Modified 2004 December 19 by CM: For "write" action, display the projected area for each Doppler and delay-Doppler frame Modified 2004 May 3 by CM: For "write" action, display the (delay-)Doppler corrections for each frame Modified 2004 April 9 by CM: For "write" action, display the solar azimuth angles (N->E in the POS) Modified 2004 March 27 by CM: Eliminate output of range (rng) plane-of-sky images for delay-Doppler frames For "write" action, display the epoch, solar phase angle and apparent spin vector direction at the midpoint of lightcurve datasets For "write" action, if "plot_spinvec" parameter is turned on, POS pgm images include an arrow indicating the target's intrinsic spin vector. For "write" action, if "plot_subradar" parameter is turned on, POS pgm images for (delay-)Doppler datasets include an X indicating the target's subradar point. For "write" action, if "plot_com" parameter is turned on, POS pgm images for (delay-)Doppler datasets include a cross indicating the target's projected COM. For "write" action, if "plot_pa" parameter vector has any component(s) turned on, POS ppm images for (delay-)Doppler datasets include colored cylindrical shaft(s) indicating the positive end of the corresponding principal axis/axes. Modified 2004 Feb 29 by CM: Add comments for lightcurves Remove "sdev" argument to routine gamma_trans Compute lightcurve magnitudes rather than negative magnitudes Eliminate the "curve_mm" lightcurve output file, since it nearly duplicates the "fit.mm" file (except that the cal factor isn't included) Move the lightcurve calculations to the new "calc_lghtcrv" routine Eliminate the unused dat argument to calc_deldop, calc_doppler, and calc_range Eliminate "type" argument to the "apply_photo" routine, and add the "phase" (solar phase angle) argument Label lightcurve POS images as 0 through (ncalc-1) rather than 1 through ncalc, similar to (delay-)Doppler pgm images Modified 2003 July 30 by CM: Add three parameters for rotating/flipping output pgm files for delay-Doppler images (fit, data, residuals) Modified 2003 May 16 by CM: Add listres parameter for producing output files containing residual matrices Modified 2003 May 13 by CM: Don't resample and recenter residual pgm images if dd_scaling = none Correct a bug in normalizing file output for Doppler fits Modified 2003 May 10 by CM: Add scalefitobs parameter so that user can choose whether to scale the data and fit pgm images separately (default), to the maximum value of the two taken together, to the maximum fit value, or to the maximum data value Modified 2003 May 7 by CM: Add sinc2width argument to pos2deldop and pos2doppler Modified 2003 April 29 by CM: Don't truncate residuals to integer values before making pgm images Add nsinc2 argument to pos2deldop and pos2doppler Modified 2003 April 28 by CM: Display two angles for the spin vector, not just one Modified 2003 April 24 by CM: Move "delcom" from delay-Doppler datasets to individual frames Modified 2003 April 23 by CM: Removed "deldopoffs" call from calc_deldop and "dopoffs" call from calc_deldop, since these calls are now included in realize_delcor *****************************************************************************************/ extern "C" { #include "head.h" } __host__ void calc_deldop_cuda_af(struct par_t *dpar, struct mod_t *dmod, struct dat_t *ddat, int s, int c); __host__ void calc_doppler_cuda_af(struct par_t *dpar, struct mod_t *dmod, struct dat_t *ddat, int s, int c); //__host__ void calc_poset_cuda( struct par_t *par, struct mod_t *mod, int s); //__host__ void calc_lghtcrv_cuda(struct par_t *par, struct mod_t *mod, struct // lghtcrv_t *lghtcrv, int s); __device__ int cfaf_nframes, cfaf_nviews, cfaf_v0_index, cfaf_exclude_seen; __device__ unsigned char cfaf_type; __global__ void cf_init_devpar_af_krnl(struct par_t *dpar, struct mod_t *dmod, struct dat_t *ddat, int c, int *nf_nsets) { /* Single-threaded kernel */ if (threadIdx.x == 0) { dpar->posbnd = 0; dpar->badposet = 0; dpar->badradar = 0; dpar->posbnd_logfactor = 0.0; dpar->badposet_logfactor = 0.0; dpar->badradar_logfactor = 0.0; nf_nsets[0] = dmod->shape.comp[c].real.nf; nf_nsets[1] = ddat->nsets; } } __global__ void cf_init_seen_flags_af_krnl(struct mod_t *dmod, int c, int *nf_nsets) { /* nf-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; if (f < nf_nsets[0]) dmod->shape.comp[c].real.f[f].seen = 0; } __global__ void cf_get_set_type_af_krnl(struct dat_t *ddat, int s) { /* Single-threaded kernel */ if (threadIdx.x == 0) cfaf_type = ddat->set[s].type; } //__global__ void cf_set_final_pars_af_krnl(struct par_t *dpar, struct // dat_t *ddat) { // /* Single-threaded kernel */ // if (threadIdx.x == 0) { // dpar->posbnd_logfactor /= ddat->dof; // dpar->badposet_logfactor /= ddat->dof_poset; // dpar->badradar_logfactor /= (ddat->dof_deldop + ddat->dof_doppler); // } //} __host__ void calc_fits_cuda_af(struct par_t *dpar, struct mod_t *dmod, struct dat_t *ddat) { int s, *nf_nsets, c=0; unsigned char type; dim3 BLK,THD; cudaCalloc((void**)&nf_nsets, sizeof(int), 2); /* Initialize flags that indicate the model extends beyond POS frame, that * plane-of-sky fit images are too small to "contain" the target, and that * model is too wide in (delay-)Doppler space to create (delay-)Doppler fit * frames. Note that this also gets mod->shape.nf and nsets */ cf_init_devpar_af_krnl<<<1,1>>>(dpar, dmod, ddat, c, nf_nsets); checkErrorAfterKernelLaunch("cf_init_devpar_af_krnl"); deviceSyncAfterKernelLaunch("cf_init_devpar_af_krn"); /* Initialize the flags that indicate whether or not each facet of each * model component is ever visible and unshadowed from Earth * Note: Single component only for now. */ //for (c=0; c<mod->shape.ncomp; c++) BLK.x = floor((maxThreadsPerBlock - 1 + nf_nsets[0])/maxThreadsPerBlock); THD.x = maxThreadsPerBlock; cf_init_seen_flags_af_krnl<<<BLK,THD>>>(dmod, c, nf_nsets); checkErrorAfterKernelLaunch("cf_init_seen_flags_af_krnl"); deviceSyncAfterKernelLaunch("cf_init_seen_flags_af_krnl"); /* Calculate the fits for each dataset in turn - use multi-GPU later */ for (s=0; s<nf_nsets[1]; s++) { /* Get data type */ cf_get_set_type_af_krnl<<<1,1>>>(ddat, s); checkErrorAfterKernelLaunch("cf_init_seen_flags_krnl (calc_fits_cuda)"); gpuErrchk(cudaMemcpyFromSymbol(&type, cfaf_type, sizeof(unsigned char), 0, cudaMemcpyDeviceToHost)); switch (type) { case DELAY: calc_deldop_cuda_af(dpar, dmod, ddat, s, c); break; case DOPPLER: calc_doppler_cuda_af(dpar, dmod, ddat, s, c); break; case POS: printf("Write calc_poset_cuda!"); // calc_poset_cuda(dpar, dmod, s); break; case LGHTCRV: printf("Write calc_lghtcrv_cuda!"); // calc_lghtcrv_cuda(dpar, dmod, s); break; default: printf("calc_fits_cuda.c: can't handle this type yet\n"); } } /* Complete calculations of values that will be used during a fit to * increase the objective function for models with bad properties */ cf_set_final_pars_krnl<<<1,1>>>(dpar, ddat); checkErrorAfterKernelLaunch("cf_set_final_pars_af_krnl"); } __global__ void cf_get_frames_af_krnl(struct dat_t *ddat, int s) { /* Single-threaded kernel */ if (threadIdx.x == 0) { switch(ddat->set[s].type) { case DELAY: cfaf_nframes = ddat->set[s].desc.deldop.nframes; break; case DOPPLER: cfaf_nframes = ddat->set[s].desc.doppler.nframes; break; case POS: cfaf_nframes = ddat->set[s].desc.poset.nframes; break; case LGHTCRV: cfaf_nframes = ddat->set[s].desc.lghtcrv.ncalc; break; } } } __global__ void cf_set_shortcuts_deldop_af_krnl( struct dat_t *ddat, struct deldopfrm_t **frame, struct deldopview_t **view0, struct pos_t **pos, float *overflow, int *ndel, int *ndop, int s, int nframes) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm < nframes) { if (threadIdx.x==0) { cfaf_nviews = ddat->set[s].desc.deldop.nviews; cfaf_v0_index = ddat->set[s].desc.deldop.v0; overflow[0] = 0.0; // cf_overflow_o2_store = 0.0; overflow[1] = 0.0; // cf_overflow_m2_store = 0.0; overflow[2] = 0.0; // cf_overflow_xsec_store = 0.0; overflow[3] = 0.0; // cf_overflow_dopmean_store = 0.0; overflow[4] = 0.0; // cf_overflow_delmean_store = 0.0; } frame[frm] = &ddat->set[s].desc.deldop.frame[frm]; ndop[frm] = frame[frm]->ndop; ndel[frm] = frame[frm]->ndel; view0[frm] = &frame[frm]->view[ddat->set[s].desc.deldop.v0]; pos[frm] = &frame[frm]->pos; } } __global__ void cf_set_shortcuts_doppler_af_krnl(struct dat_t *ddat, int s, int nframes, struct dopfrm_t **frame, int *ndop, struct dopview_t **view0, struct pos_t **pos, float *overflow, int4 *xylim) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm < nframes) { if (threadIdx.x == 0) { cfaf_nviews = ddat->set[s].desc.doppler.nviews; cfaf_v0_index = ddat->set[s].desc.doppler.v0; overflow[0] = 0.0; // cf_overflow_o2_store = 0.0; overflow[1] = 0.0; // cf_overflow_m2_store = 0.0; overflow[2] = 0.0; // cf_overflow_xsec_store = 0.0; overflow[3] = 0.0; // cf_overflow_dopmean_store = 0.0; } frame[frm] = &ddat->set[s].desc.doppler.frame[frm]; view0[frm] = &frame[frm]->view[ddat->set[s].desc.doppler.v0]; ndop[frm] = frame[frm]->ndop; pos[frm] = &frame[frm]->pos; } } __global__ void cf_set_pos_ae_deldop_af_krnl(struct pos_t **pos, struct deldopfrm_t **frame, int *pos_n, int nframes, int v) { /* nframes*9-threaded kernel */ int offset = threadIdx.x; int i = offset % 3; int j = offset / 3; int frm = blockIdx.x; if ((offset < 9) && (frm < nframes)) { pos[frm]->ae[i][j] = frame[frm]->view[v].ae[i][j]; pos[frm]->oe[i][j] = frame[frm]->view[v].oe[i][j]; /* Single-thread task */ if (offset == 0) { pos[frm]->bistatic = 0; pos_n[frm] = pos[frm]->n; } } } __global__ void cf_set_pos_ae_doppler_af_krnl(struct pos_t **pos, struct dopfrm_t **frame, int *pos_n, int nframes, int v) { /* nframes*9-threaded kernel */ int offset = threadIdx.x; int i = offset % 3; int j = offset / 3; int frm = blockIdx.x; if ((offset < 9) && (frm < nframes)) { pos[frm]->ae[i][j] = frame[frm]->view[v].ae[i][j]; pos[frm]->oe[i][j] = frame[frm]->view[v].oe[i][j]; /* frm-level-thread task */ if (offset == 0) { pos[frm]->bistatic = 0; pos_n[frm] = pos[frm]->n; } } } __global__ void cf_posclr_af_krnl(struct pos_t **pos, int n, int nx, int frame_size, int nframes) { /* (nframes * npixels)-threaded kernel where npixels is the number of pixels * in the full POS image, so (2*pos->n + 1)^2 */ int total_offset = blockIdx.x * blockDim.x + threadIdx.x; int frm = total_offset / frame_size; int offset = total_offset % frame_size; // local offset within one frame int i = (offset % nx) - n; int j = (offset / nx) - n; if ((offset < frame_size) && (total_offset < nframes*frame_size) && (frm < nframes)) { /* For each POS pixel, zero out the optical brightness (b) and * cos(scattering angle), reset the z coordinate (distance from COM towards * Earth) to a dummy value, and reset the body, component, and facet onto * which the pixel center projects to dummy values */ pos[frm]->body[i][j] = pos[frm]->comp[i][j] = pos[frm]->f[i][j] = -1; pos[frm]->b_s[offset] = pos[frm]->cose_s[offset] = 0.0; pos[frm]->z_s[offset] = -HUGENUMBER; /* In the x direction, reset the model's leftmost and rightmost * pixel number to dummy values, and similarly for the y direction */ pos[frm]->xlim[0] = pos[frm]->ylim[0] = n; pos[frm]->xlim[1] = pos[frm]->ylim[1] = -n; /* For a bistatic situation (lightcurve or plane-of-sky dataset), zero out * cos(incidence angle) and reset the distance towards the sun, the body, * component, and facet numbers as viewed from the sun, and the model's * maximum projected extent as viewed from the sun to dummy values */ if (pos[frm]->bistatic) { pos[frm]->bodyill[i][j] = pos[frm]->compill[i][j] = pos[frm]->fill[i][j] = -1; pos[frm]->cosill_s[offset] = 0.0; pos[frm]->zill_s[offset] = 0.0; pos[frm]->xlim2[0] = pos[frm]->ylim2[0] = n; pos[frm]->xlim2[1] = pos[frm]->ylim2[1] = -n; } } } __global__ void cf_set_posbnd_deldop_af_krnl(struct par_t *dpar, struct deldopfrm_t **frame, struct pos_t **pos, int nframes) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm<nframes) { if (frm==0) dpar->posbnd = 1; dpar->posbnd_logfactor += frame[frm]->dof * pos[frm]->posbnd_logfactor; } } __global__ void cf_set_posbnd_doppler_af_krnl(struct par_t *dpar, struct dopfrm_t **frame, struct pos_t **pos, int nframes) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm<nframes) { if (frm==0) dpar->posbnd = 1; dpar->posbnd_logfactor += frame[frm]->dof * pos[frm]->posbnd_logfactor; } } __global__ void cf_get_exclude_seen_af_krnl(struct par_t *dpar, struct pos_t **pos, int4 *xylim, int nframes) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm < nframes) { if (threadIdx.x == 0) cfaf_exclude_seen = dpar->exclude_seen; xylim[frm].w = pos[frm]->xlim[0]; xylim[frm].x = pos[frm]->xlim[1]; xylim[frm].y = pos[frm]->ylim[0]; xylim[frm].z = pos[frm]->ylim[1]; } } __global__ void cf_get_global_frmsz_krnl(int *global_lim, int4 *xylim, int nframes) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm < nframes) { /* Initialize global_lim */ for (int i=0; i<4; i++) global_lim[i] = 0; /* Now calculate minimum for all frames */ atomicMin(&global_lim[0], xylim[frm].w); atomicMax(&global_lim[1], xylim[frm].x); atomicMin(&global_lim[2], xylim[frm].y); atomicMax(&global_lim[3], xylim[frm].z); } } __global__ void cf_mark_pixels_seen_af_krnl( struct par_t *dpar, struct mod_t *dmod, struct pos_t **pos, int *global_lim, int frame_size, int xspan, int nframes, int c) { /* nframes*npixels-threaded kernel */ int total_offset = blockIdx.x * blockDim.x + threadIdx.x; int frm = total_offset / frame_size; int offset = total_offset % frame_size; int k = (offset % xspan) + global_lim[0]; // cf_xlim0; int l = (offset / xspan) + global_lim[2]; // cf_ylim0; int facetnum; c = 0; if ((offset < frame_size) && (frm < nframes)) { if ((pos[frm]->cose_s[offset] > dpar->mincosine_seen) && (pos[frm]->f[k][l] >= 0)) { facetnum = pos[frm]->f[k][l]; //c = cf_pos->comp[k][l]; dmod->shape.comp[c].real.f[facetnum].seen = 1; } } } __global__ void cf_set_badradar_deldop_af_krnl( struct par_t *dpar, struct dat_t *ddat, struct deldopfrm_t **frame, int s, int nframes) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm < nframes) { if (threadIdx.x == 0) dpar->badradar = 1; dpar->badradar_logfactor += frame[frm]->dof * frame[frm]->badradar_logfactor / ddat->set[s].desc.deldop.nviews; } } __global__ void cf_set_badradar_doppler_af_krnl( struct par_t *dpar, struct dat_t *ddat, struct dopfrm_t **frame, int s, int nframes) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm < nframes) { if (threadIdx.x == 0) dpar->badradar = 1; dpar->badradar_logfactor +=frame[frm]->dof * frame[frm]->badradar_logfactor / ddat->set[s].desc.doppler.nviews; } } __global__ void cf_add_fit_store_af_krnl1( struct dat_t *ddat, float **fit_store, int frame_size, int s, int nframes) { /* (nframes*ndel*ndop)-threaded kernel */ int total_offset = blockIdx.x * blockDim.x + threadIdx.x; int frm = total_offset / frame_size; int offset = total_offset % frame_size; if ((offset < frame_size) && (frm < nframes)) { switch (cfaf_type) { case DELAY: fit_store[frm][offset] += ddat->set[s].desc.deldop.frame[frm].fit_s[offset]; break; case DOPPLER: fit_store[frm][offset] += ddat->set[s].desc.doppler.frame[frm].fit_s[offset]; break; } } } __global__ void cf_add_fit_store_deldop_af_krnl2( struct deldopfrm_t **frame, float *overflow, int nframes) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm < nframes) { /* overflow[0] - overflow_o2_store * overflow[1] - overflow_m2_store * overflow[2] - overflow_xsec_store * overflow[3] - overflow_dopmean_store * overflow[4] - overflow_delmean_store */ atomicAdd(&overflow[0], (float)frame[frm]->overflow_o2); atomicAdd(&overflow[1], (float)frame[frm]->overflow_m2); atomicAdd(&overflow[2], (float)frame[frm]->overflow_xsec); atomicAdd(&overflow[3], (float)frame[frm]->overflow_delmean); atomicAdd(&overflow[4], (float)frame[frm]->overflow_dopmean); } } __global__ void cf_add_fit_store_doppler_af_krnl2( struct dopfrm_t **frame, float *overflow, int nframes) { /* nframes-threaded kernel */ int frm = threadIdx.x; if (frm < nframes) { /* overflow[0] - overflow_o2_store * overflow[1] - overflow_m2_store * overflow[2] - overflow_xsec_store * overflow[3] - overflow_dopmean_store */ atomicAdd(&overflow[0], (float)frame[frm]->overflow_o2); atomicAdd(&overflow[1], (float)frame[frm]->overflow_m2); atomicAdd(&overflow[2], (float)frame[frm]->overflow_xsec); atomicAdd(&overflow[3], (float)frame[frm]->overflow_dopmean); } } __global__ void cf_finish_fit_store_af_krnl( struct dat_t *ddat, float **fit_store, int s, int nThreads, int frame_size) { /* (nframes*ndel*ndop)-threaded kernel for Delay Doppler, * (nframes*ndop)-threaded kernel for Doppler */ int total_offset = blockIdx.x * blockDim.x + threadIdx.x; int frm = total_offset / frame_size; int offset = total_offset % frame_size; if (offset < nThreads) switch (cfaf_type) { case DELAY: ddat->set[s].desc.deldop.frame[frm].fit_s[offset] = fit_store[frm][offset]; break; case DOPPLER: ddat->set[s].desc.doppler.frame[frm].fit_s[offset] = fit_store[frm][offset]; break; } } __global__ void cf_finish_fit_deldop_af_krnl2( struct deldopfrm_t **frame, float *overflow, int nframes) { /* nframes-threaded Kernel */ int frm = threadIdx.x; if (frm < nframes) { /* overflow[0] = overflow_o2_store * overflow[1] = overflow_m2_store * overflow[2] = overflow_xsec_store * overflow[3] = overflow_dopmean_store * overflow[4] = overflow_delmean_store */ frame[frm]->overflow_o2 = overflow[0] / cfaf_nviews; frame[frm]->overflow_m2 = overflow[1] / cfaf_nviews; frame[frm]->overflow_xsec = overflow[2] / cfaf_nviews; frame[frm]->overflow_dopmean = overflow[3] / cfaf_nviews; frame[frm]->overflow_delmean = overflow[4] / cfaf_nviews; } } __global__ void cf_finish_fit_doppler_af_krnl2( struct dopfrm_t **frame, float *overflow, int nframes) { /* nframes-threaded Kernel */ int frm = threadIdx.x; if (frm < nframes) { /* overflow[0] = overflow_o2_store * overflow[1] = overflow_m2_store * overflow[2] = overflow_xsec_store * overflow[3] = overflow_dopmean_store */ frame[frm]->overflow_o2 = overflow[0] / cfaf_nviews; frame[frm]->overflow_m2 = overflow[1] / cfaf_nviews; frame[frm]->overflow_xsec = overflow[2] / cfaf_nviews; frame[frm]->overflow_dopmean = overflow[3] / cfaf_nviews; } } __global__ void cf_gamma_trans_deldop_af_krnl( struct par_t *dpar, struct dat_t *ddat, int s, int nframes, int frame_size) { /* Multi-threaded kernel */ int total_offset = blockIdx.x * blockDim.x + threadIdx.x; int frm = total_offset / frame_size; int offset = total_offset % frame_size; /* Each thread uses this value, so put it in shared memory */ __shared__ float dd_gamma; dd_gamma = (float)dpar->dd_gamma; if ((offset < frame_size) && (frm < nframes) && (dd_gamma != 0)) { /* Carry out a gamma transformation on the fit image if requested */ dev_gamma_trans_float(&ddat->set[s].desc.deldop.frame[frm].fit_s[offset], dd_gamma); } } __host__ void calc_deldop_cuda_af(struct par_t *dpar, struct mod_t *dmod, struct dat_t *ddat, int s, int c) { float orbit_offset[3] = {0.0, 0.0, 0.0}; int nframes, nThreads, nviews, v0_index, nx, exclude_seen, v, v2, xspan, yspan, frmsz; int *ndel, *ndop, *global_lim, *pos_n; int4 *xylim; float *overflow, **fit_store; struct deldopfrm_t **frame; struct deldopview_t **view0; struct pos_t **pos; dim3 BLK,THD; /* Get # of frames for this deldop */ cf_get_frames_af_krnl<<<1,1>>>(ddat, s); checkErrorAfterKernelLaunch("cf_get_nframes_af_krnl"); gpuErrchk(cudaMemcpyFromSymbol(&nframes, cfaf_nframes, sizeof(int), 0, cudaMemcpyDeviceToHost)); /* Allocate memory */ cudaCalloc((void**)&frame, sizeof(struct deldopfrm_t*), nframes); cudaCalloc((void**)&view0, sizeof(struct deldopview_t*),nframes); cudaCalloc((void**)&pos, sizeof(struct pos_t*), nframes); cudaCalloc((void**)&overflow, sizeof(float), 5); cudaCalloc((void**)&ndel, sizeof(int), nframes); cudaCalloc((void**)&ndop, sizeof(int), nframes); cudaCalloc((void**)&pos_n, sizeof(int), nframes); cudaCalloc((void**)&global_lim, sizeof(int), nframes); cudaCalloc((void**)&xylim, sizeof(int4), nframes); // for (f=0; f<nframes; f++) { /* Set frame, view0, and pos */ THD.x = nframes; cf_set_shortcuts_deldop_af_krnl<<<1,THD>>>(ddat, frame, view0, pos, overflow, ndel, ndop, s, nframes); checkErrorAfterKernelLaunch("cf_set_shortcuts_deldop_af_krnl"); deviceSyncAfterKernelLaunch("cf_set_shortcuts_deldop_af_krnl"); gpuErrchk(cudaMemcpyFromSymbol(&nviews, cfaf_nviews, sizeof(int), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&v0_index, cfaf_v0_index, sizeof(int), 0, cudaMemcpyDeviceToHost)); /* Calculate size of each frame's fit array. This assumes all frames have * the same number of doppler and delay bins. */ frmsz = ndel[0]*ndop[0]; /* If smearing is being modeled, initialize variables that * will be used to sum results calculated for individual views. */ if (nviews > 1) { /* Allocate fit_store which is a double-pointer in the af version as * each frame needs its own fit_store array */ cudaCalloc((void**)&fit_store, sizeof(float*), nframes); for (int i=0; i<nframes; i++) cudaCalloc((void**)&fit_store[i], sizeof(float), frmsz); } /* Loop over all views for this (smeared) frame, going in an order that ends with the view corresponding to the epoch listed for this frame in the obs file; this way we can use the calculated information for that view in the "write" action screen and disk output that follows */ for (v2=v0_index+1; v2<=v0_index+nviews; v2++) { v = v2 % nviews; /* Launch 9*nframes-threaded kernel to set pos->ae,pos->oe,pos->bistatic.*/ THD.x = 9; BLK.x = nframes; cf_set_pos_ae_deldop_af_krnl<<<BLK,THD>>>(pos, frame, pos_n, nframes, v); checkErrorAfterKernelLaunch("cf_set_pos_ae_deldop_af_krnl"); deviceSyncAfterKernelLaunch("cf_set_pos_ae_deldop_af_krnl"); /* Configure & launch posclr_krnl to initialize POS view */ nx = 2*pos_n[0]+1; nThreads = nframes * nx * nx; BLK.x = floor((maxThreadsPerBlock - 1 + nThreads) / maxThreadsPerBlock); THD.x = maxThreadsPerBlock; // Thread block dimensions cf_posclr_af_krnl<<<BLK,THD>>>(pos, pos_n[0], nx, (nx*nx), nframes); checkErrorAfterKernelLaunch("cf_posclr_af_krnl"); /* Call posvis_cuda_2 to get facet number, scattering angle, * distance toward Earth at center of each POS pixel; set flag * posbnd if any model portion extends beyond POS frame limits.*/ /* NOTE: Limited to single component for now */ if (posvis_af(dpar,dmod,ddat,orbit_offset,s,nframes,0,0,0) && v == v0_index) { /* Call single-threaded kernel to set dpar->posbnd and * dpar->posbnd_logfactor */ THD.x = nframes; cf_set_posbnd_deldop_af_krnl<<<BLK,THD>>>(dpar,frame,pos,nframes); checkErrorAfterKernelLaunch("cf_set_posbnd_deldop_af_krnl"); } /* Launch nframes-threaded kernel to get dpar->exclude_seen */ THD.x = nframes; cf_get_exclude_seen_af_krnl<<<1,THD>>>(dpar,pos,xylim,nframes); checkErrorAfterKernelLaunch("cf_get_exclude_seen_af_krnl"); gpuErrchk(cudaMemcpyFromSymbol(&exclude_seen, cfaf_exclude_seen, sizeof(int), 0, cudaMemcpyDeviceToHost)); /* Get the largest pos->xlim and ylim values for all frames */ cf_get_global_frmsz_krnl<<<1,THD>>>(global_lim, xylim, nframes); checkErrorAfterKernelLaunch("cf_get_global_frmsz_krnl"); deviceSyncAfterKernelLaunch("cf_get_global_frmsz_krnl"); /* Go through all POS pixels which are visible with low enough * scattering angle and mark the facets which project onto their * centers as having been "seen" at least once */ if (s != exclude_seen && v == v0_index) { xspan = global_lim[1] - global_lim[0] + 1; // xlim1 - xlim0 + 1; yspan = global_lim[3] - global_lim[2] + 1; // ylim1 - ylim0 + 1; nThreads = nframes * xspan * yspan; /* Configure & launch posclr_af_krnl to initialize POS view */ BLK.x = floor((maxThreadsPerBlock - 1 + nThreads) / maxThreadsPerBlock); THD.x = maxThreadsPerBlock; // Thread block dimensions cf_mark_pixels_seen_af_krnl<<<BLK,THD>>>(dpar, dmod, pos, global_lim, (xspan*yspan), xspan, nframes, c); checkErrorAfterKernelLaunch("cf_mark_pixels_seen_af_krnl"); } /* Zero out the fit delay-Doppler image, then call pos2deldop to * create the fit image by mapping power from the plane of the sky * to delay-Doppler space. */ deviceSyncAfterKernelLaunch("pre-clrvect_krnl sync in calc_fits_cuda_af.cu"); nThreads = frmsz * nframes; BLK.x = floor((maxThreadsPerBlock-1 + nThreads)/maxThreadsPerBlock); THD.x = maxThreadsPerBlock; // Thread block dimensions clrvect_af_krnl<<<BLK,THD>>>(ddat, s, nframes, nThreads, frmsz); checkErrorAfterKernelLaunch("clrvect_af_krnl, calc_fits_cuda"); if (pos2deldop_cuda_af(dpar,dmod,ddat,0.0,0.0,0.0,0, s,nframes,v)) { /* Call single-threaded kernel to set badradar flag and * associated badradar_logfactor */ THD.x = nframes; cf_set_badradar_deldop_af_krnl<<<1,THD>>>(dpar, ddat, frame, s, nframes); checkErrorAfterKernelLaunch("cf_set_badradar_deldop_af_krnl"); } /* If smearing is being modeled, include delay-Doppler calculations * from this view in the summed results for this frame */ if (nviews > 1) { /* Launch ndel*ndop-threaded kernel to add fit[i][j] to * fit_store[i][j]*/ /* frmsz and nThreads are still accurate from the clrvect call */ BLK.x = floor((maxThreadsPerBlock-1 + nThreads)/maxThreadsPerBlock); THD.x = maxThreadsPerBlock; cf_add_fit_store_af_krnl1<<<BLK,THD>>>(ddat,fit_store,frmsz,s,nframes); checkErrorAfterKernelLaunch("cf_add_fit_store_af_krnl1"); THD.x = nframes; cf_add_fit_store_deldop_af_krnl2<<<1,THD>>>(frame,overflow,nframes); checkErrorAfterKernelLaunch("cf_add_fit_store_deldop_af_krnl2"); } } /* end views loop */ /* If smearing is being modeled, compute mean values over all views for * this frame and store them in the standard frame structure */ /* This kernel also carries out the gamma transformation on the fit * image if the par->dd_gamma flag is not set */ if (nviews > 1) { /* Launch (nframes*ndel*ndop)-threaded kernel */ nThreads = frmsz*nframes; BLK.x = floor((maxThreadsPerBlock-1 + nThreads)/maxThreadsPerBlock); THD.x = maxThreadsPerBlock; cf_finish_fit_store_af_krnl<<<BLK,THD>>>(ddat,fit_store,s,nThreads,frmsz); checkErrorAfterKernelLaunch("cf_finish_fit_af_store"); THD.x = nframes; cf_finish_fit_deldop_af_krnl2<<<1,THD>>>(frame,overflow,nframes); checkErrorAfterKernelLaunch("cf_finish_fit_deldop_af_krnl2"); THD.x = maxThreadsPerBlock; cf_gamma_trans_deldop_af_krnl<<<BLK,THD>>>(dpar, ddat, s, nframes, frmsz); checkErrorAfterKernelLaunch("cf_gamma_trans_krnl"); cudaFree(fit_store); } //} /* end loop over frames */ /* De-allocate memory */ cudaFree(frame); cudaFree(view0); cudaFree(pos); cudaFree(overflow); cudaFree(ndel); cudaFree(ndop); cudaFree(pos_n); cudaFree(global_lim); cudaFree(xylim); } __host__ void calc_doppler_cuda_af(struct par_t *dpar, struct mod_t *dmod, struct dat_t *ddat, int s, int c) { float orbit_offset[3] = {0.0, 0.0, 0.0}; float **fit_store, *overflow; int *ndop, *pos_n, *global_lim, v0_index, frmsz, nThreads, exclude_seen, nviews, nframes, nx, f, v, v2, xspan, yspan; dim3 BLK,THD; struct dopfrm_t **frame; struct dopview_t **view0; struct pos_t **pos; int4 *xylim; /* Get # of frames for this deldop */ cf_get_frames_af_krnl<<<1,1>>>(ddat, s); checkErrorAfterKernelLaunch("cf_get_nframes_krnl (calc_deldop_cuda)"); gpuErrchk(cudaMemcpyFromSymbol(&nframes, cfaf_nframes, sizeof(int), 0, cudaMemcpyDeviceToHost)); /* Allocate memory */ cudaCalloc((void**)&frame, sizeof(struct dopfrm_t*), nframes); cudaCalloc((void**)&view0, sizeof(struct dopview_t*),nframes); cudaCalloc((void**)&pos, sizeof(struct pos_t*), nframes); cudaCalloc((void**)&overflow, sizeof(float), 4); cudaCalloc((void**)&ndop, sizeof(int), nframes); cudaCalloc((void**)&pos_n, sizeof(int), nframes); cudaCalloc((void**)&global_lim, sizeof(int), nframes); cudaCalloc((void**)&xylim, sizeof(int4), nframes); // for (f=0; f<nframes; f++) { /* Set frame, view0, and pos */ THD.x = nframes; cf_set_shortcuts_doppler_af_krnl<<<1,THD>>>(ddat, s, nframes, frame, ndop, view0, pos, overflow, xylim); checkErrorAfterKernelLaunch("cf_set_shortcuts_doppler_af_krnl"); deviceSyncAfterKernelLaunch("cf_set_shortcuts_doppler_af_krnl"); gpuErrchk(cudaMemcpyFromSymbol(&nviews, cfaf_nviews, sizeof(int), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&v0_index, cfaf_v0_index, sizeof(int), 0, cudaMemcpyDeviceToHost)); /* Calculate size of each frame's fit array. This assumes all frames have * the same number of doppler. */ frmsz = ndop[0]; /* If smearing is being modeled, initialize variables that * will be used to sum results calculated for individual views. */ if (nviews > 1) { /* Allocate fit_store which is a double-pointer in the af version as * each frame needs its own fit_store array */ cudaCalloc((void**)&fit_store, sizeof(float*), nframes); for (int i=0; i<nframes; i++) cudaCalloc((void**)&fit_store[i], sizeof(float), frmsz); } /* Loop over all views for this (smeared) frame, going in an order that * ends with the view corresponding to the epoch listed for this frame * in the obs file; this way we can use the calculated information for * that view in the "write" action screen and disk output that follows*/ for (v2=v0_index+1; v2<=v0_index+nviews; v2++) { v = v2 % nviews; /* Launch 9*nframes-threaded kernel to set pos->ae,pos->oe,pos->bistatic.*/ THD.x = 9; BLK.x = nframes; cf_set_pos_ae_doppler_af_krnl<<<BLK,THD>>>(pos,frame,pos_n,nframes,v); checkErrorAfterKernelLaunch("cf_set_pos_ae_doppler_af_krnl"); deviceSyncAfterKernelLaunch("cf_set_pos_ae_doppler_af_krnl"); /* Configure & launch posclr_krnl to initialize POS view */ nx = 2*pos_n[0]+1; nThreads = nframes * nx * nx; BLK.x = floor((maxThreadsPerBlock-1 + nThreads)/maxThreadsPerBlock); THD.x = maxThreadsPerBlock; cf_posclr_af_krnl<<<BLK,THD>>>(pos, pos_n[0], nx, (nx*nx), nframes); checkErrorAfterKernelLaunch("cf_posclr_af_krnl"); /* Call posvis_cuda_2 to get facet number, scattering angle, * distance toward Earth at center of each POS pixel; set flag * posbnd if any model portion extends beyond POS frame limits.*/ /* NOTE: Limited to single component for now */ if (posvis_af(dpar,dmod,ddat,orbit_offset,s,nframes,0,0,0) && v == v0_index) { /* Call single-threaded kernel to set dpar->posbnd and * dpar->posbnd_logfactor */ THD.x = nframes; cf_set_posbnd_doppler_af_krnl<<<1,THD>>>(dpar,frame,pos,nframes); checkErrorAfterKernelLaunch("cf_set_posbnd_doppler_af_krnl"); } /* Launch nframes-threaded kernel to get dpar->exclude_seen */ THD.x = nframes; cf_get_exclude_seen_af_krnl<<<1,THD>>>(dpar,pos,xylim,nframes); checkErrorAfterKernelLaunch("cf_get_exclude_seen_af_krnl"); gpuErrchk(cudaMemcpyFromSymbol(&exclude_seen, cfaf_exclude_seen, sizeof(int), 0, cudaMemcpyDeviceToHost)); /* Get the largest pos->xlim and ylim values for all frames */ cf_get_global_frmsz_krnl<<<1,THD>>>(global_lim, xylim, nframes); checkErrorAfterKernelLaunch("cf_get_global_frmsz_krnl"); deviceSyncAfterKernelLaunch("cf_get_global_frmsz_krnl"); /* Go through all POS pixels visible with low enough scattering * angle and mark the facets which project onto their centers as * having been "seen" at least once */ /* I'll launch a multi-threaded kernel here: * (xlim1 - xlim0 + 1)^2 threads */ if (s != exclude_seen && v == v0_index) { xspan = global_lim[1] - global_lim[0] + 1; // xlim1 - xlim0 + 1; yspan = global_lim[3] - global_lim[2] + 1; // ylim1 - ylim0 + 1; nThreads = nframes * xspan * yspan; /* Configure & launch posclr_af_krnl to initialize POS view */ BLK.x = floor((maxThreadsPerBlock-1+nThreads)/maxThreadsPerBlock); THD.x = maxThreadsPerBlock; cf_mark_pixels_seen_af_krnl<<<BLK,THD>>>(dpar, dmod, pos, global_lim, (xspan*yspan), xspan, nframes, c); checkErrorAfterKernelLaunch("cf_mark_pixels_seen_af_krnl"); } /* Zero out fit Doppler spectrum, then call pos2doppler to create * the fit image by mapping power from the plane of the sky to * Doppler space. */ deviceSyncAfterKernelLaunch("pre-clrvect_krnl sync in calc_fits_cuda_af.cu"); nThreads = frmsz * nframes; BLK.x = floor((maxThreadsPerBlock-1+nThreads)/maxThreadsPerBlock); THD.x = maxThreadsPerBlock; clrvect_af_krnl<<<BLK,THD>>>(ddat, s, nframes, nThreads, frmsz); checkErrorAfterKernelLaunch("clrvect_af_krnl"); deviceSyncAfterKernelLaunch("clrvect_af_krnl"); if (pos2doppler_cuda_af(dpar,dmod,ddat,0.0,0.0,0.0,0, s,nframes,v)) { /* nframes-threaded kernel to set badradar flag and calc. logfactor*/ THD.x = nframes; cf_set_badradar_doppler_af_krnl<<<1,THD>>>(dpar,ddat,frame,s,nframes); checkErrorAfterKernelLaunch("cf_set_badradar_doppler_af_krnl"); } /* If smearing is being modeled, include the Doppler calculations from * this view in the summed results for this frame */ if (nviews > 1) { /* Launch ndop-threaded kernel to add fit[i][j] to fit_store[i][j]*/ BLK.x = floor((maxThreadsPerBlock-1 + nThreads)/maxThreadsPerBlock); THD.x = maxThreadsPerBlock; cf_add_fit_store_af_krnl1<<<BLK,THD>>>(ddat,fit_store,frmsz,s,nframes); checkErrorAfterKernelLaunch("cf_add_fit_store_af_krnl1"); THD.x = nframes; cf_add_fit_store_doppler_af_krnl2<<<1,THD>>>(frame,overflow,nframes); checkErrorAfterKernelLaunch("cf_add_fit_store_doppler_af_krnl2"); } } /* If smearing is being modeled, compute mean values over all views for * this frame and store them in the standard frame structure */ /* This kernel also carries out the gamma transformation on the fit * image if the par->dd_gamma flag is not set */ if (nviews > 1) { /* Launch (nframes*ndop)-threaded kernel */ nThreads = frmsz*nframes; BLK.x = floor((maxThreadsPerBlock-1 + nThreads)/maxThreadsPerBlock); THD.x = maxThreadsPerBlock; cf_finish_fit_store_af_krnl<<<BLK,THD>>>(ddat,fit_store,s,nThreads,frmsz); checkErrorAfterKernelLaunch("cf_finish_fit_af_store"); THD.x = nframes; cf_finish_fit_doppler_af_krnl2<<<1,THD>>>(frame,overflow,nframes); checkErrorAfterKernelLaunch("cf_finish_fit_deldop_af_krnl2"); cudaFree(fit_store); } /* De-allocate memory */ cudaFree(frame); cudaFree(view0); cudaFree(pos); cudaFree(overflow); cudaFree(ndop); cudaFree(pos_n); cudaFree(global_lim); cudaFree(xylim); // } /* end loop over frames */ } //void calc_poset( struct par_t *par, struct mod_t *mod, struct poset_t *poset, // int s) //{ // const char *monthName[12] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun", // "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}; // double orbit_offset[3] = {0.0, 0.0, 0.0}; // // FILE *fpopt; // char tempstring[MAXLEN], name[MAXLEN]; // int year, mon, day, hour, min, sec, f, c, i, j, k, l, nrow_fit, ncol_fit, n_pos, // facetnum, x, y, v, v2; // double w[3], spin_colat, spin_azim, xoff, yoff, resamp_fact, resamp_x0, resamp_y0, // xcom_fit, ycom_fit, resamp_xwidth, resamp_ywidth, resamp_angle, oa[3][3], // to_earth[3], to_earth_lat, to_earth_long, rotphase, sa[3][3], to_sun[3], // to_sun_lat, to_sun_long, pab[3], pab_lat, pab_long, intensityfactor, // phi, theta, psi, intspin_body[3], badposet_logfactor_view; // double **fit_store; // struct posetfrm_t *frame; // struct posetview_t *view0; // struct pos_t *pos; // // for (f=0; f<poset->nframes; f++) { // // frame = &poset->frame[f]; // view0 = &frame->view[poset->v0]; // pos = &frame->pos; // // ncol_fit = frame->ncol; // nrow_fit = frame->nrow; // // /* If smearing is being modeled, initialize variables that // will be used to sum results calculated for individual views */ // // if (poset->nviews > 1) { // fit_store = matrix( 1, ncol_fit, 1, nrow_fit); // for (i=1; i<=ncol_fit; i++) // for (j=1; j<=nrow_fit; j++) // fit_store[i][j] = 0.0; // } // // /* Loop over all views for this (smeared) frame, going in an order that // ends with the view corresponding to the epoch listed for this frame // in the obs file; this way we can use the calculated information for // that view in the "write" action screen and disk output that follows */ // // for (v2=poset->v0+1; v2<=poset->v0+poset->nviews; v2++) { // v = v2 % poset->nviews; // // for (i=0; i<=2; i++) // for (j=0; j<=2; j++) { // pos->ae[i][j] = frame->view[v].ae[i][j]; // pos->oe[i][j] = frame->view[v].oe[i][j]; // pos->se[i][j] = frame->view[v].se[i][j]; // } // pos->bistatic = 1; // // /* Initialize the plane-of-sky view */ // // posclr( pos); // // /* Call routine posvis to get the facet number, scattering angle, // incidence angle, and distance toward Earth at the center of // each POS pixel; set the posbnd parameter to 1 if any portion // of the model extends beyond the POS frame limits. */ // // for (c=0; c<mod->shape.ncomp; c++) // if (posvis( &mod->shape.comp[c].real, orbit_offset, pos, // (int) par->pos_smooth, 0, 0, c) && v == poset->v0) { // par->posbnd = 1; // if (pos->bistatic) // par->posbnd_logfactor += 0.5 * frame->dof * pos->posbnd_logfactor; // else // par->posbnd_logfactor += frame->dof * pos->posbnd_logfactor; // } // // /* Now view the model from the source (sun) and get the facet number // and distance toward the source of each pixel in this projected view; // use this information to determine which POS pixels are shadowed */ // // if (pos->bistatic) { // for (c=0; c<mod->shape.ncomp; c++) // if (posvis( &mod->shape.comp[c].real, orbit_offset, pos, // 0, 1, 0, c)) { // par->posbnd = 1; // par->posbnd_logfactor += 0.5 * frame->dof * pos->posbnd_logfactor; // } // // /* Identify and mask out shadowed POS pixels */ // // posmask( pos, par->mask_tol); // } // // /* Go through all POS pixels which are visible and unshadowed with // sufficiently low scattering and incidence angles, and mark the facets // which project onto their centers as having been "seen" at least once */ // // if (s != par->exclude_seen && v == poset->v0) { // for (k=pos->xlim[0]; k<=pos->xlim[1]; k++) // for (l=pos->ylim[0]; l<=pos->ylim[1]; l++) { // if ((pos->cose[k][l] > par->mincosine_seen) // && (pos->cosi[k][l] > par->mincosine_seen) // && (pos->f[k][l] >= 0)) { // facetnum = pos->f[k][l]; // c = pos->comp[k][l]; // mod->shape.comp[c].real.f[facetnum].seen = 1; // } // } // } // // /* Compute the sky rendering */ // // intensityfactor = pow( pos->km_per_pixel/AU, 2.0); // apply_photo( mod, poset->ioptlaw, frame->view[v].solar_phase, // intensityfactor, pos, 0); // // /* Resample the sky rendering to get the model plane-of-sky image */ // /* (if using bicubic interpolation or cubic convolution, force */ // /* all model pixel values to be nonnegative) */ // /* */ // /* Implement the x and y COM offsets, xoff and yoff, by first */ // /* using them to compute xcom_fit and ycom_fit -- the COM position */ // /* in the fit image, relative to the center of the fit image -- and */ // /* then shifting the resampled region in the *opposite* direction */ // /* by the appropriate proportional amount. Then implement the */ // /* "northangle" setting (clockwise heading of north) by rotating */ // /* the resampling grid *counterclockwise* by northangle. */ // // n_pos = pos->n; // xoff = frame->off[0].val; // yoff = frame->off[1].val; // xcom_fit = (frame->colcom_vig - (ncol_fit + 1)/2.0) + xoff; // ycom_fit = (frame->rowcom_vig - (nrow_fit + 1)/2.0) + yoff; // resamp_fact = frame->fit.km_per_pixel / pos->km_per_pixel; // resamp_x0 = -xcom_fit*resamp_fact; // resamp_y0 = -ycom_fit*resamp_fact; // resamp_xwidth = resamp_fact*(ncol_fit - 1); // resamp_ywidth = resamp_fact*(nrow_fit - 1); // resamp_angle = -frame->northangle; // resampim( frame->pos.b, -n_pos, n_pos, -n_pos, n_pos, // frame->fit.b, 1, ncol_fit, 1, nrow_fit, // resamp_x0, resamp_xwidth, resamp_y0, resamp_ywidth, resamp_angle, // (int) par->poset_resample, (int) par->image_rebin); // if (par->poset_resample == BICUBIC || par->poset_resample == CUBICCONV) { // for (k=1; k<=ncol_fit; k++) // for (l=1; l<=nrow_fit; l++) // frame->fit.b[k][l] = MAX( 0.0, frame->fit.b[k][l]); // } // // /* Set the badposet flag and increase badposet_logfactor if the model */ // /* plane-of-sky image is too small to "contain" all of the sky */ // /* rendering's nonzero pixels. */ // // if (checkposet( pos->b, -n_pos, n_pos, -n_pos, n_pos, // resamp_x0, resamp_xwidth, resamp_y0, resamp_ywidth, resamp_angle, // &badposet_logfactor_view)) { // par->badposet = 1; // par->badposet_logfactor += frame->dof * badposet_logfactor_view // / poset->nviews; // } // // /* If smearing is being modeled, include the plane-of-sky // calculations from this view in the summed results for this frame */ // // if (poset->nviews > 1) // for (i=1; i<=ncol_fit; i++) // for (j=1; j<=nrow_fit; j++) // fit_store[i][j] += frame->fit.b[i][j]; // // } // // /* If smearing is being modeled, compute mean values over all views // for this frame and store them in the standard frame structure */ // // if (poset->nviews > 1) { // for (i=1; i<=ncol_fit; i++) // for (j=1; j<=nrow_fit; j++) // frame->fit.b[i][j] = fit_store[i][j] / poset->nviews; // free_matrix( fit_store, 1, ncol_fit, 1, nrow_fit); // } // // // } /* end loop over frames */ //} // // //void calc_lghtcrv( struct par_t *par, struct mod_t *mod, struct lghtcrv_t *lghtcrv, // int s) //{ // const char *monthName[12] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun", // "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}; // double orbit_offset[3] = {0.0, 0.0, 0.0}; // // FILE *fpopt; // char tempstring[MAXLEN], name[MAXLEN]; // int year, mon, day, hour, min, sec, n, ncalc, c, i, i_mid, j, k, l, facetnum, // n_cross360, n_projectedpixels, n_shadowedpixels, x, y, v; // double epoch_mid, epoch_diff_min, epoch_diff, w[3], spin_colat, spin_azim, oa[3][3], // rotphase, sa[3][3], to_sun[3], to_sun_lat, to_sun_long, pab[3], pab_lat, // pab_long, intensityfactor, phi, theta, psi, intspin_body[3], posbnd_logfactor, // projected_area, lambertdisk_intensity, interp; // double **to_earth, *to_earth_lat, *to_earth_long, *rotphase_unwrapped; // struct crvrend_t *rend; // struct pos_t *pos; // // /* Initialize variables to avoid compilation warning */ // // i_mid = 0; // epoch_mid = epoch_diff = epoch_diff_min = 0.0; // n_cross360 = 0; // to_earth = NULL; // to_earth_lat = to_earth_long = rotphase_unwrapped = NULL; // // /* Initialize variables dealing with bad models */ // // posbnd_logfactor = 0.0; // // /* Get n, the number of observed points for this lightcurve, // and ncalc, the number of epochs at which model lightcurve // brightnesses are to be computed */ // // n = lghtcrv->n; // ncalc = lghtcrv->ncalc; // // /* Calculate the model lightcurve values at each of the user-specified // epochs x[i], with i=1,2,...,ncalc; these may or may not be the same as the // epochs t[i] (i=1,2,...,n) at which actual lightcurve observations were made. */ // // for (i=1; i<=ncalc; i++) { // // rend = &lghtcrv->rend[i]; // pos = &rend->pos; // // for (j=0; j<=2; j++) // for (k=0; k<=2; k++) { // pos->ae[j][k] = rend->ae[j][k]; // pos->oe[j][k] = rend->oe[j][k]; // pos->se[j][k] = rend->se[j][k]; // } // pos->bistatic = 1; // // /* Initialize the plane-of-sky view */ // // posclr( pos); // // /* Call routine posvis to get the facet number, scattering angle, // incidence angle, and distance toward Earth at the center of // each POS pixel; set the posbnd parameter to 1 if any portion // of the model extends beyond the POS frame limits. */ // // for (c=0; c<mod->shape.ncomp; c++) // if (posvis( &mod->shape.comp[c].real, orbit_offset, pos, // (int) par->pos_smooth, 0, 0, c)) { // par->posbnd = 1; // if (pos->bistatic) // posbnd_logfactor += 0.5 * pos->posbnd_logfactor; // else // posbnd_logfactor += pos->posbnd_logfactor; // } // // /* Now view the model from the source (sun) and get the facet number // and distance toward the source of each pixel in this projected view; // use this information to determine which POS pixels are shadowed */ // // if (pos->bistatic) { // for (c=0; c<mod->shape.ncomp; c++) // if (posvis( &mod->shape.comp[c].real, orbit_offset, pos, // 0, 1, 0, c)) { // par->posbnd = 1; // posbnd_logfactor += 0.5 * pos->posbnd_logfactor; // } // // /* Identify and mask out shadowed POS pixels */ // // posmask( pos, par->mask_tol); // } // // /* Go through all POS pixels which are visible and unshadowed with // sufficiently low scattering and incidence angles, and mark the facets // which project onto their centers as having been "seen" at least once */ // // if (s != par->exclude_seen) { // for (k=pos->xlim[0]; k<=pos->xlim[1]; k++) // for (l=pos->ylim[0]; l<=pos->ylim[1]; l++) { // if ((pos->cose[k][l] > par->mincosine_seen) // && (pos->cosi[k][l] > par->mincosine_seen) // && (pos->f[k][l] >= 0)) { // facetnum = pos->f[k][l]; // c = pos->comp[k][l]; // mod->shape.comp[c].real.f[facetnum].seen = 1; // } // } // } // // /* Compute the model brightness for this model lightcurve point */ // // intensityfactor = pow( pos->km_per_pixel/AU, 2.0); // lghtcrv->y[i] = apply_photo( mod, lghtcrv->ioptlaw, lghtcrv->solar_phase[i], // intensityfactor, pos, 0); // // /* Finished with this calculated lightcurve point */ // // } // // /* Now that we have calculated the model lightcurve brightnesses y at each // of the epochs x, we use cubic spline interpolation (Numerical Recipes // routines spline and splint) to get model lightcurve brightness fit[i] // at each OBSERVATION epoch t[i], with i=1,2,...,n. This will allow us // (in routine chi2) to compare model to data (fit[i] to obs[i]) to get // chi-square. Note that vector y2 contains the second derivatives of // the interpolating function at the calculation epochs x. // // Smearing is handled by interpolating the brightness at the time t of // each individual view and then taking the mean of all views that // correspond to a given observed lightcurve point. */ // // spline( lghtcrv->x, lghtcrv->y, ncalc, 2.0e30, 2.0e30, lghtcrv->y2); // for (i=1; i<=n; i++) { // lghtcrv->fit[i] = 0.0; // for (v=0; v<lghtcrv->nviews; v++) { // splint( lghtcrv->x, lghtcrv->y, lghtcrv->y2, ncalc, // lghtcrv->t[i][v], &interp); // lghtcrv->fit[i] += interp; // } // lghtcrv->fit[i] /= lghtcrv->nviews; // } // // /* Deal with flags for model that extends beyond the POS frame */ // // par->posbnd_logfactor += lghtcrv->dof * (posbnd_logfactor/ncalc); // //}
ee8f44d422300549320a225019b1aafaedc4a0e7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <arbor/gpu/gpu_common.hpp> #include <arbor/gpu/math_cu.hpp> #include <arbor/gpu/reduce_by_key.hpp> #include <arbor/mechanism_abi.h> namespace testing { #define PPACK_IFACE_BLOCK \ auto _pp_var_width __attribute__((unused)) = params_.width;\ auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\ auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\ auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\ auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\ auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\ auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\ auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\ auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\ auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\ auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\ auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\ auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\ auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\ auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\ auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\ auto* _pp_var_weight __attribute__((unused)) = params_.weight;\ auto& _pp_var_events __attribute__((unused)) = params_.events;\ auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\ auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\ auto _pp_var_xi0 __attribute__((unused)) = params_.globals[0];\ auto _pp_var_xo0 __attribute__((unused)) = params_.globals[1];\ auto _pp_var_s0 __attribute__((unused)) = params_.globals[2];\ auto* _pp_var_s __attribute__((unused)) = params_.state_vars[0];\ auto& _pp_var_ion_x __attribute__((unused)) = params_.ion_states[0];\ auto* _pp_var_ion_x_index __attribute__((unused)) = params_.ion_states[0].index;\ //End of IFACEBLOCK namespace { using ::arb::gpu::exprelr; using ::arb::gpu::safeinv; using ::arb::gpu::min; using ::arb::gpu::max; __global__ void init(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto ion_x_indexi_ = _pp_var_ion_x_index[tid_]; arb_value_type xo = 0; arb_value_type xi = 0; _pp_var_s[tid_] = _pp_var_s0; xi = _pp_var_xi0; xo = _pp_var_xo0; _pp_var_ion_x.external_concentration[ion_x_indexi_] = fma(_pp_var_weight[tid_], xo, _pp_var_ion_x.external_concentration[ion_x_indexi_]); _pp_var_ion_x.internal_concentration[ion_x_indexi_] = fma(_pp_var_weight[tid_], xi, _pp_var_ion_x.internal_concentration[ion_x_indexi_]); } } __global__ void multiply(arb_mechanism_ppack params_) { PPACK_IFACE_BLOCK; auto tid_ = threadIdx.x + blockDim.x*blockIdx.x; auto idx_ = blockIdx.y; if(tid_<_pp_var_width) { _pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_]; } } __global__ void compute_currents(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto ion_x_indexi_ = _pp_var_ion_x_index[tid_]; arb_value_type xo = 0; arb_value_type xi = 0; _pp_var_s[tid_] = _pp_var_s0; xi = _pp_var_xi0; xo = _pp_var_xo0; _pp_var_ion_x.external_concentration[ion_x_indexi_] = fma(_pp_var_weight[tid_], xo, _pp_var_ion_x.external_concentration[ion_x_indexi_]); _pp_var_ion_x.internal_concentration[ion_x_indexi_] = fma(_pp_var_weight[tid_], xi, _pp_var_ion_x.internal_concentration[ion_x_indexi_]); } } } // namespace void mechanism_write_Xi_Xo_gpu_init_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( init), dim3(grid_dim), dim3(block_dim), 0, 0, *p); if (!p->multiplicity) return; hipLaunchKernelGGL(( multiply), dim3(dim3{grid_dim), dim3(1}), block_dim, 0, *p); } void mechanism_write_Xi_Xo_gpu_compute_currents_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( compute_currents), dim3(grid_dim), dim3(block_dim), 0, 0, *p); } void mechanism_write_Xi_Xo_gpu_advance_state_(arb_mechanism_ppack* p) {} void mechanism_write_Xi_Xo_gpu_write_ions_(arb_mechanism_ppack* p) {} void mechanism_write_Xi_Xo_gpu_post_event_(arb_mechanism_ppack* p) {} void mechanism_write_Xi_Xo_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {} } // namespace testing
ee8f44d422300549320a225019b1aafaedc4a0e7.cu
#include <arbor/gpu/gpu_common.hpp> #include <arbor/gpu/math_cu.hpp> #include <arbor/gpu/reduce_by_key.hpp> #include <arbor/mechanism_abi.h> namespace testing { #define PPACK_IFACE_BLOCK \ auto _pp_var_width __attribute__((unused)) = params_.width;\ auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\ auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\ auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\ auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\ auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\ auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\ auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\ auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\ auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\ auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\ auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\ auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\ auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\ auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\ auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\ auto* _pp_var_weight __attribute__((unused)) = params_.weight;\ auto& _pp_var_events __attribute__((unused)) = params_.events;\ auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\ auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\ auto _pp_var_xi0 __attribute__((unused)) = params_.globals[0];\ auto _pp_var_xo0 __attribute__((unused)) = params_.globals[1];\ auto _pp_var_s0 __attribute__((unused)) = params_.globals[2];\ auto* _pp_var_s __attribute__((unused)) = params_.state_vars[0];\ auto& _pp_var_ion_x __attribute__((unused)) = params_.ion_states[0];\ auto* _pp_var_ion_x_index __attribute__((unused)) = params_.ion_states[0].index;\ //End of IFACEBLOCK namespace { using ::arb::gpu::exprelr; using ::arb::gpu::safeinv; using ::arb::gpu::min; using ::arb::gpu::max; __global__ void init(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto ion_x_indexi_ = _pp_var_ion_x_index[tid_]; arb_value_type xo = 0; arb_value_type xi = 0; _pp_var_s[tid_] = _pp_var_s0; xi = _pp_var_xi0; xo = _pp_var_xo0; _pp_var_ion_x.external_concentration[ion_x_indexi_] = fma(_pp_var_weight[tid_], xo, _pp_var_ion_x.external_concentration[ion_x_indexi_]); _pp_var_ion_x.internal_concentration[ion_x_indexi_] = fma(_pp_var_weight[tid_], xi, _pp_var_ion_x.internal_concentration[ion_x_indexi_]); } } __global__ void multiply(arb_mechanism_ppack params_) { PPACK_IFACE_BLOCK; auto tid_ = threadIdx.x + blockDim.x*blockIdx.x; auto idx_ = blockIdx.y; if(tid_<_pp_var_width) { _pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_]; } } __global__ void compute_currents(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto ion_x_indexi_ = _pp_var_ion_x_index[tid_]; arb_value_type xo = 0; arb_value_type xi = 0; _pp_var_s[tid_] = _pp_var_s0; xi = _pp_var_xi0; xo = _pp_var_xo0; _pp_var_ion_x.external_concentration[ion_x_indexi_] = fma(_pp_var_weight[tid_], xo, _pp_var_ion_x.external_concentration[ion_x_indexi_]); _pp_var_ion_x.internal_concentration[ion_x_indexi_] = fma(_pp_var_weight[tid_], xi, _pp_var_ion_x.internal_concentration[ion_x_indexi_]); } } } // namespace void mechanism_write_Xi_Xo_gpu_init_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); init<<<grid_dim, block_dim>>>(*p); if (!p->multiplicity) return; multiply<<<dim3{grid_dim, 1}, block_dim>>>(*p); } void mechanism_write_Xi_Xo_gpu_compute_currents_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); compute_currents<<<grid_dim, block_dim>>>(*p); } void mechanism_write_Xi_Xo_gpu_advance_state_(arb_mechanism_ppack* p) {} void mechanism_write_Xi_Xo_gpu_write_ions_(arb_mechanism_ppack* p) {} void mechanism_write_Xi_Xo_gpu_post_event_(arb_mechanism_ppack* p) {} void mechanism_write_Xi_Xo_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {} } // namespace testing
f9d2f46c5cd6db10157ab71015967355dd94c050.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cuda_multiplicarmatriz.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *M = NULL; hipMalloc(&M, XSIZE*YSIZE); float *N = NULL; hipMalloc(&N, XSIZE*YSIZE); float *R = NULL; hipMalloc(&R, XSIZE*YSIZE); int tamM = 1; int tamN = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cuda_multiplicarmatriz), dim3(gridBlock),dim3(threadBlock), 0, 0, M,N,R,tamM,tamN); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cuda_multiplicarmatriz), dim3(gridBlock),dim3(threadBlock), 0, 0, M,N,R,tamM,tamN); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cuda_multiplicarmatriz), dim3(gridBlock),dim3(threadBlock), 0, 0, M,N,R,tamM,tamN); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f9d2f46c5cd6db10157ab71015967355dd94c050.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cuda_multiplicarmatriz.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *M = NULL; cudaMalloc(&M, XSIZE*YSIZE); float *N = NULL; cudaMalloc(&N, XSIZE*YSIZE); float *R = NULL; cudaMalloc(&R, XSIZE*YSIZE); int tamM = 1; int tamN = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cuda_multiplicarmatriz<<<gridBlock,threadBlock>>>(M,N,R,tamM,tamN); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cuda_multiplicarmatriz<<<gridBlock,threadBlock>>>(M,N,R,tamM,tamN); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cuda_multiplicarmatriz<<<gridBlock,threadBlock>>>(M,N,R,tamM,tamN); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ae6e12f9aacb539a5ec5daad53327d9a94d3c458.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #ifdef GEM5_FUSION #include <stdint.h> extern "C" { void m5_work_begin(uint64_t workid, uint64_t threadid); void m5_work_end(uint64_t workid, uint64_t threadid); void m5_dump_stats(uint64_t ns_delay, uint64_t ns_period); } #endif #define BLOCK_SIZE 16 #define STR_SIZE 256 /* maximum power density possible (say 300W for a 10mm x 10mm chip) */ #define MAX_PD (3.0e6) /* required precision in degrees */ #define PRECISION 0.001 #define SPEC_HEAT_SI 1.75e6 #define K_SI 100 /* capacitance fitting factor */ #define FACTOR_CHIP 0.5 /* chip parameters */ float t_chip = 0.0005; float chip_height = 0.016; float chip_width = 0.016; /* ambient temperature, assuming no package at all */ float amb_temp = 80.0; void run(int argc, char** argv); /* define timer macros */ #define pin_stats_reset() startCycle() #define pin_stats_pause(cycles) stopCycle(cycles) #define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles) void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } void writeoutput(float *vect, int grid_rows, int grid_cols, char *file){ int i,j, index=0; // FILE *fp; // char str[STR_SIZE]; // if( (fp = fopen(file, "w" )) == 0 ) // printf( "The file was not opened\n" ); for (i=0; i < grid_rows; i++) { for (j=0; j < grid_cols; j++) { printf("%g ", vect[i*grid_cols+j]); // fputs(str,fp); index++; } printf("\n"); } // fclose(fp); } void readinput(float *vect, int grid_rows, int grid_cols, char *file){ int i,j; FILE *fp; char str[STR_SIZE]; float val; if( (fp = fopen(file, "r" )) ==0 ) printf( "The file was not opened: %s\n", file ); for (i=0; i <= grid_rows-1; i++) for (j=0; j <= grid_cols-1; j++) { fgets(str, STR_SIZE, fp); if (feof(fp)) fatal("not enough lines in file"); //if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1))) if ((sscanf(str, "%f", &val) != 1)) fatal("invalid file format"); vect[i*grid_cols+j] = val; } fclose(fp); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void calculate_temp(int iteration, //number of iteration float *power, //power input float *temp_src, //temperature input/output float *temp_dst, //temperature input/output int grid_cols, //Col of grid int grid_rows, //Row of grid int border_cols, // border offset int border_rows, // border offset float Cap, //Capacitance float Rx, float Ry, float Rz, float step, float time_elapsed){ __shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result float amb_temp = 80.0; float step_div_Cap; float Rx_1,Ry_1,Rz_1; int bx = blockIdx.x; int by = blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; step_div_Cap=step/Cap; Rx_1=1/Rx; Ry_1=1/Ry; Rz_1=1/Rz; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE // calculate the boundary for the block according to // the boundary of its small block int blkY = small_block_rows*by-border_rows; int blkX = small_block_cols*bx-border_cols; int blkYmax = blkY+BLOCK_SIZE-1; int blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination int yidx = blkY+ty; int xidx = blkX+tx; // load data if it is within the valid input range int loadYidx=yidx, loadXidx=xidx; int index = grid_rows*loadYidx+loadXidx; if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){ temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory } __syncthreads(); // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validYmin = (blkY < 0) ? -blkY : 0; int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1; int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1; int N = ty-1; int S = ty+1; int W = tx-1; int E = tx+1; N = (N < validYmin) ? validYmin : N; S = (S > validYmax) ? validYmax : S; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool computed; for (int i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(tx, validXmin, validXmax) && \ IN_RANGE(ty, validYmin, validYmax) ) { computed = true; temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] + (temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 + (temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 + (amb_temp - temp_on_cuda[ty][tx]) * Rz_1); } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range temp_on_cuda[ty][tx]= temp_t[ty][tx]; __syncthreads(); } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ temp_dst[index]= temp_t[ty][tx]; } } /* compute N time steps */ int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \ int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(blockCols, blockRows); float grid_height = chip_height / row; float grid_width = chip_width / col; float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height; float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height); float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width); float Rz = t_chip / (K_SI * grid_height * grid_width); float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI); float step = PRECISION / max_slope; float t; float time_elapsed; time_elapsed=0.001; int src = 1, dst = 0; for (t = 0; t < total_iterations; t+=num_iterations) { int temp = src; src = dst; dst = temp; hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\ col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed); hipDeviceSynchronize(); } return dst; } int main(int argc, char** argv) { run(argc,argv); return EXIT_SUCCESS; } #define MAX_STRING_LENGTH 1024 char dtfile[] = "data/temp.dat"; char dpfile[] = "data/power.dat"; char dofile[] = "output_pyramid.dat"; void run(int argc, char** argv) { int size; int grid_rows,grid_cols; float *FilesavingTemp[2],*FilesavingPower,*MatrixOut; char* tfile = (char*)&dtfile; char* pfile = (char*)&dpfile; char* ofile = (char*)&dofile; int total_iterations = 60; int pyramid_height = 1; // number of iterations if (argc >= 2) { grid_rows = atoi(argv[1]); grid_cols = atoi(argv[1]); } if (argc >= 3) pyramid_height = atoi(argv[2]); if (argc >= 4) total_iterations = atoi(argv[3]); if (argc >= 5) { tfile = argv[4]; } if (argc >= 6) { pfile = argv[5]; } if (argc >= 7) { ofile = argv[6]; } if (argc < 4 || argc > 7) { printf("Wrong Usage: grid_rows/cols pyramid_height total_iterations\n"); exit(0); } size=grid_rows*grid_cols; /* --------------- pyramid parameters --------------- */ # define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline int borderCols = (pyramid_height)*EXPAND_RATE/2; int borderRows = (pyramid_height)*EXPAND_RATE/2; int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1); int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1); FilesavingTemp[0] = (float *) malloc(size*sizeof(float)); FilesavingTemp[1] = (float *) malloc(size*sizeof(float)); FilesavingPower = (float *) malloc(size*sizeof(float)); MatrixOut = (float *) calloc (size, sizeof(float)); if( !FilesavingPower || !FilesavingTemp[0] || !FilesavingTemp[1] || !MatrixOut) fatal("unable to allocate memory"); printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\ pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow); readinput(FilesavingTemp[0], grid_rows, grid_cols, tfile); readinput(FilesavingPower, grid_rows, grid_cols, pfile); // float *MatrixTemp[2], *MatrixPower; // hipMalloc((void**)&MatrixTemp[0], sizeof(float)*size); // hipMalloc((void**)&MatrixTemp[1], sizeof(float)*size); // // hipMalloc((void**)&MatrixPower, sizeof(float)*size); #ifdef GEM5_FUSION m5_dump_stats(0, 0); m5_work_begin(0, 0); #endif // hipMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, hipMemcpyHostToDevice); // hipMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, hipMemcpyHostToDevice); int ret; //REPEAT KERNEL FOR LONG GPU EXECUTION for (int adp=0; adp<1000; adp++) { printf("Starting GPU execution %d", adp+1); ret = compute_tran_temp(FilesavingPower,FilesavingTemp,grid_cols,grid_rows, \ total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows); } //hipMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, hipMemcpyDeviceToHost); #ifdef GEM5_FUSION m5_work_end(0, 0); #endif writeoutput(FilesavingTemp[ret],grid_rows, grid_cols, ofile); // hipFree(MatrixPower); // hipFree(MatrixTemp[0]); // hipFree(MatrixTemp[1]); // free(MatrixOut); printf("\nTEST PASSED\n"); }
ae6e12f9aacb539a5ec5daad53327d9a94d3c458.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #ifdef GEM5_FUSION #include <stdint.h> extern "C" { void m5_work_begin(uint64_t workid, uint64_t threadid); void m5_work_end(uint64_t workid, uint64_t threadid); void m5_dump_stats(uint64_t ns_delay, uint64_t ns_period); } #endif #define BLOCK_SIZE 16 #define STR_SIZE 256 /* maximum power density possible (say 300W for a 10mm x 10mm chip) */ #define MAX_PD (3.0e6) /* required precision in degrees */ #define PRECISION 0.001 #define SPEC_HEAT_SI 1.75e6 #define K_SI 100 /* capacitance fitting factor */ #define FACTOR_CHIP 0.5 /* chip parameters */ float t_chip = 0.0005; float chip_height = 0.016; float chip_width = 0.016; /* ambient temperature, assuming no package at all */ float amb_temp = 80.0; void run(int argc, char** argv); /* define timer macros */ #define pin_stats_reset() startCycle() #define pin_stats_pause(cycles) stopCycle(cycles) #define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles) void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } void writeoutput(float *vect, int grid_rows, int grid_cols, char *file){ int i,j, index=0; // FILE *fp; // char str[STR_SIZE]; // if( (fp = fopen(file, "w" )) == 0 ) // printf( "The file was not opened\n" ); for (i=0; i < grid_rows; i++) { for (j=0; j < grid_cols; j++) { printf("%g ", vect[i*grid_cols+j]); // fputs(str,fp); index++; } printf("\n"); } // fclose(fp); } void readinput(float *vect, int grid_rows, int grid_cols, char *file){ int i,j; FILE *fp; char str[STR_SIZE]; float val; if( (fp = fopen(file, "r" )) ==0 ) printf( "The file was not opened: %s\n", file ); for (i=0; i <= grid_rows-1; i++) for (j=0; j <= grid_cols-1; j++) { fgets(str, STR_SIZE, fp); if (feof(fp)) fatal("not enough lines in file"); //if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1))) if ((sscanf(str, "%f", &val) != 1)) fatal("invalid file format"); vect[i*grid_cols+j] = val; } fclose(fp); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void calculate_temp(int iteration, //number of iteration float *power, //power input float *temp_src, //temperature input/output float *temp_dst, //temperature input/output int grid_cols, //Col of grid int grid_rows, //Row of grid int border_cols, // border offset int border_rows, // border offset float Cap, //Capacitance float Rx, float Ry, float Rz, float step, float time_elapsed){ __shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result float amb_temp = 80.0; float step_div_Cap; float Rx_1,Ry_1,Rz_1; int bx = blockIdx.x; int by = blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; step_div_Cap=step/Cap; Rx_1=1/Rx; Ry_1=1/Ry; Rz_1=1/Rz; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE // calculate the boundary for the block according to // the boundary of its small block int blkY = small_block_rows*by-border_rows; int blkX = small_block_cols*bx-border_cols; int blkYmax = blkY+BLOCK_SIZE-1; int blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination int yidx = blkY+ty; int xidx = blkX+tx; // load data if it is within the valid input range int loadYidx=yidx, loadXidx=xidx; int index = grid_rows*loadYidx+loadXidx; if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){ temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory } __syncthreads(); // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validYmin = (blkY < 0) ? -blkY : 0; int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1; int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1; int N = ty-1; int S = ty+1; int W = tx-1; int E = tx+1; N = (N < validYmin) ? validYmin : N; S = (S > validYmax) ? validYmax : S; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool computed; for (int i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(tx, validXmin, validXmax) && \ IN_RANGE(ty, validYmin, validYmax) ) { computed = true; temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] + (temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 + (temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 + (amb_temp - temp_on_cuda[ty][tx]) * Rz_1); } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range temp_on_cuda[ty][tx]= temp_t[ty][tx]; __syncthreads(); } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ temp_dst[index]= temp_t[ty][tx]; } } /* compute N time steps */ int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \ int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(blockCols, blockRows); float grid_height = chip_height / row; float grid_width = chip_width / col; float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height; float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height); float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width); float Rz = t_chip / (K_SI * grid_height * grid_width); float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI); float step = PRECISION / max_slope; float t; float time_elapsed; time_elapsed=0.001; int src = 1, dst = 0; for (t = 0; t < total_iterations; t+=num_iterations) { int temp = src; src = dst; dst = temp; calculate_temp<<<dimGrid, dimBlock>>>(MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\ col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed); cudaThreadSynchronize(); } return dst; } int main(int argc, char** argv) { run(argc,argv); return EXIT_SUCCESS; } #define MAX_STRING_LENGTH 1024 char dtfile[] = "data/temp.dat"; char dpfile[] = "data/power.dat"; char dofile[] = "output_pyramid.dat"; void run(int argc, char** argv) { int size; int grid_rows,grid_cols; float *FilesavingTemp[2],*FilesavingPower,*MatrixOut; char* tfile = (char*)&dtfile; char* pfile = (char*)&dpfile; char* ofile = (char*)&dofile; int total_iterations = 60; int pyramid_height = 1; // number of iterations if (argc >= 2) { grid_rows = atoi(argv[1]); grid_cols = atoi(argv[1]); } if (argc >= 3) pyramid_height = atoi(argv[2]); if (argc >= 4) total_iterations = atoi(argv[3]); if (argc >= 5) { tfile = argv[4]; } if (argc >= 6) { pfile = argv[5]; } if (argc >= 7) { ofile = argv[6]; } if (argc < 4 || argc > 7) { printf("Wrong Usage: grid_rows/cols pyramid_height total_iterations\n"); exit(0); } size=grid_rows*grid_cols; /* --------------- pyramid parameters --------------- */ # define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline int borderCols = (pyramid_height)*EXPAND_RATE/2; int borderRows = (pyramid_height)*EXPAND_RATE/2; int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1); int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1); FilesavingTemp[0] = (float *) malloc(size*sizeof(float)); FilesavingTemp[1] = (float *) malloc(size*sizeof(float)); FilesavingPower = (float *) malloc(size*sizeof(float)); MatrixOut = (float *) calloc (size, sizeof(float)); if( !FilesavingPower || !FilesavingTemp[0] || !FilesavingTemp[1] || !MatrixOut) fatal("unable to allocate memory"); printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\ pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow); readinput(FilesavingTemp[0], grid_rows, grid_cols, tfile); readinput(FilesavingPower, grid_rows, grid_cols, pfile); // float *MatrixTemp[2], *MatrixPower; // cudaMalloc((void**)&MatrixTemp[0], sizeof(float)*size); // cudaMalloc((void**)&MatrixTemp[1], sizeof(float)*size); // // cudaMalloc((void**)&MatrixPower, sizeof(float)*size); #ifdef GEM5_FUSION m5_dump_stats(0, 0); m5_work_begin(0, 0); #endif // cudaMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, cudaMemcpyHostToDevice); // cudaMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, cudaMemcpyHostToDevice); int ret; //REPEAT KERNEL FOR LONG GPU EXECUTION for (int adp=0; adp<1000; adp++) { printf("Starting GPU execution %d", adp+1); ret = compute_tran_temp(FilesavingPower,FilesavingTemp,grid_cols,grid_rows, \ total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows); } //cudaMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, cudaMemcpyDeviceToHost); #ifdef GEM5_FUSION m5_work_end(0, 0); #endif writeoutput(FilesavingTemp[ret],grid_rows, grid_cols, ofile); // cudaFree(MatrixPower); // cudaFree(MatrixTemp[0]); // cudaFree(MatrixTemp[1]); // free(MatrixOut); printf("\nTEST PASSED\n"); }
a325009e5628aa629efa21b671eb6e0bdb297b0a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2020 by Contributors * \file array/cuda/spmm.cu * \brief SPMM C APIs and definitions. */ #include <dgl/array.h> #include "./spmm.cuh" #include "./ge_spmm.cuh" #include "functor.cuh" #include "../../runtime/cuda/cuda_common.h" namespace dgl { using namespace cuda; namespace aten { namespace { /*! \brief Call cuBLAS geam API for transpose operation for float and double. */ template <typename DType> hipblasStatus_t Xgeam(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, const DType* alpha, const DType* A, int lda, const DType* beta, const DType* B, int ldb, DType* C, int ldc) { LOG(INFO) << "Not supported dtype"; return HIPBLAS_STATUS_EXECUTION_FAILED; } template <> hipblasStatus_t Xgeam<float>(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, const float* alpha, const float* A, int lda, const float* beta, const float* B, int ldb, float* C, int ldc) { return hipblasSgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } template <> hipblasStatus_t Xgeam<double>(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, const double* alpha, const double* A, int lda, const double* beta, const double* B, int ldb, double* C, int ldc) { return hipblasDgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } /* \brief IndexSelect operator kernel implementation. * \note duplicate of IndexSelectKernel defined in array_index_select.cu */ template <typename DType, typename IdType> __global__ void _IndexSelectKernel( const DType* __restrict__ in, const IdType* __restrict__ idx, DType* __restrict__ out, int n, int m) { int i = blockIdx.x; for (int j = threadIdx.x; j < m; j += blockDim.x) out[i * m + j] = in[idx[i] * m + j]; } /* \brief Transpose operator kernel implementation. * \note not efficient but it's not a bottleneck, used for float16 dtype. */ template <typename DType> __global__ void _TransposeKernel( const DType* __restrict__ in, DType* __restrict__ out, int n, int m) { int i = blockIdx.x; for (int j = threadIdx.x; j < m; j += blockDim.x) out[i * m + j] = in[j * n + i]; } /* * \brief Tranpose the input matrix. * \param row number of rows of input matrix. * \param col number of columns of input matrix. */ template <typename DType> void _Transpose(const DType* in, DType* out, int row, int col) { DType alpha = 1., beta = 0.; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); if (!thr_entry->cublas_handle) CUBLAS_CALL(hipblasCreate(&(thr_entry->cublas_handle))); CUBLAS_CALL(hipblasSetStream(thr_entry->cublas_handle, thr_entry->stream)); CUBLAS_CALL(Xgeam<DType>( thr_entry->cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, row, col, &alpha, in, col, &beta, nullptr, row, out, row)); } /* * \brief Tranpose the input matrix for data type half. * \note cuBLAS has no geam API for half data type, fallback to our kernel. */ template <> void _Transpose<half>(const half* in, half* out, int row, int col) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); int nt = FindNumThreads(row); int nb = col; CUDA_KERNEL_CALL(_TransposeKernel, nb, nt, 0, thr_entry->stream, in, out, col, row); } /* * \brief */ template <typename DType, typename IdType> __global__ void _IndexSelectKernel(const DType* array, const IdType* index, int64_t length, DType* out) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { out[tx] = array[index[tx]]; tx += stride_x; } } /* \brief IndexSelect operator. * \note duplicate of IndexSelect defined in array_op.h but it can * not be applied to float16 dtype. */ template<typename DType, typename IdType> NDArray _IndexSelect(NDArray array, NDArray index) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const DType* array_data = static_cast<DType*>(array->data); const IdType* idx_data = static_cast<IdType*>(index->data); const int64_t arr_len = array->shape[0]; const int64_t len = index->shape[0]; NDArray ret = NDArray::Empty({len}, array->dtype, array->ctx); if (len == 0) return ret; DType* ret_data = static_cast<DType*>(ret->data); const int nt = FindNumThreads(len); const int nb = (len + nt - 1) / nt; CUDA_KERNEL_CALL(_IndexSelectKernel, nb, nt, 0, thr_entry->stream, array_data, idx_data, len, ret_data); return ret; } } // namespace namespace cusparse { #if CUDART_VERSION < 11000 template <typename DType> hipsparseStatus_t Xcsrmm2(hipsparseHandle_t handle, hipsparseOperation_t transA, hipsparseOperation_t transB, int m, int n, int k, int nnz, const DType* alpha, const hipsparseMatDescr_t descrA, const DType* csrValA, const int* csrRowPtrA, const int* csrColIndA, const DType* B, int ldb, const DType* beta, DType* C, int ldc) { LOG(INFO) << "Not supported dtype"; return HIPSPARSE_STATUS_EXECUTION_FAILED; } template <> hipsparseStatus_t Xcsrmm2<float>(hipsparseHandle_t handle, hipsparseOperation_t transA, hipsparseOperation_t transB, int m, int n, int k, int nnz, const float* alpha, const hipsparseMatDescr_t descrA, const float* csrValA, const int* csrRowPtrA, const int* csrColIndA, const float* B, int ldb, const float* beta, float* C, int ldc) { return hipsparseScsrmm2(handle, transA, transB, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc); } template <> hipsparseStatus_t Xcsrmm2<double>(hipsparseHandle_t handle, hipsparseOperation_t transA, hipsparseOperation_t transB, int m, int n, int k, int nnz, const double* alpha, const hipsparseMatDescr_t descrA, const double* csrValA, const int* csrRowPtrA, const int* csrColIndA, const double* B, int ldb, const double* beta, double* C, int ldc) { return hipsparseDcsrmm2(handle, transA, transB, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc); } #endif /*! Cusparse implementation of SpMM on Csr format. */ template <typename DType, typename IdType> void CusparseCsrmm2( const DLContext& ctx, const CSRMatrix& csr, const DType* B_data, const DType* A_data, DType* C_data, int x_length) { // We use csrmm2 to perform following operation: // C = A x B, where A is a sparse matrix in csr format, B is the dense matrix for node // feature tensor. However, since cusparse only supports column-major, while our tensor // is stored in row-major, the actual computation is: // C = trans(A x trans(B)). // Currently, we use cublasXgeam to implement transposition and allocate intermediate // workspace memory for this. const int m = csr.num_rows; const int n = x_length; const int k = csr.num_cols; const int nnz = csr.indices->shape[0]; const DType alpha = 1.0; const DType beta = 0.0; // device auto device = runtime::DeviceAPI::Get(ctx); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); // allocate cusparse handle if needed if (!thr_entry->cusparse_handle) { CUSPARSE_CALL(hipsparseCreate(&(thr_entry->cusparse_handle))); } CUSPARSE_CALL(hipsparseSetStream(thr_entry->cusparse_handle, thr_entry->stream)); // all one data array DType* valptr = nullptr; if (!A_data) { valptr = static_cast<DType*>(device->AllocWorkspace(ctx, nnz * sizeof(DType))); _Fill(valptr, nnz, static_cast<DType>(1.)); } #if CUDART_VERSION >= 11000 hipsparseSpMatDescr_t matA; hipsparseDnMatDescr_t matB, matC; constexpr auto dtype = cuda_dtype<DType>::value; constexpr auto idtype = cusparse_idtype<IdType>::value; CUSPARSE_CALL(hipsparseCreateCsr(&matA, m, k, nnz, static_cast<IdType*>(csr.indptr->data), static_cast<IdType*>(csr.indices->data), const_cast<DType*>(valptr? valptr : A_data), idtype, idtype, HIPSPARSE_INDEX_BASE_ZERO, dtype)); CUSPARSE_CALL(hipsparseCreateDnMat(&matB, k, n, n, const_cast<DType*>(B_data), dtype, HIPSPARSE_ORDER_ROW)); CUSPARSE_CALL(hipsparseCreateDnMat(&matC, m, n, n, C_data, dtype, HIPSPARSE_ORDER_ROW)); auto transA = HIPSPARSE_OPERATION_NON_TRANSPOSE; auto transB = HIPSPARSE_OPERATION_NON_TRANSPOSE; size_t workspace_size; CUSPARSE_CALL(hipsparseSpMM_bufferSize( thr_entry->cusparse_handle, transA, transB, &alpha, matA, matB, &beta, matC, dtype, CUSPARSE_SPMM_CSR_ALG2, &workspace_size)); void* workspace = device->AllocWorkspace(ctx, workspace_size); CUSPARSE_CALL(hipsparseSpMM( thr_entry->cusparse_handle, transA, transB, &alpha, matA, matB, &beta, matC, dtype, CUSPARSE_SPMM_CSR_ALG2, workspace)); device->FreeWorkspace(ctx, workspace); CUSPARSE_CALL(hipsparseDestroySpMat(matA)); CUSPARSE_CALL(hipsparseDestroyDnMat(matB)); CUSPARSE_CALL(hipsparseDestroyDnMat(matC)); #else // allocate matrix for temporary transposed output DType* trans_out = static_cast<DType*>(device->AllocWorkspace(ctx, m * n * sizeof(DType))); hipsparseMatDescr_t descr; CUSPARSE_CALL(hipsparseCreateMatDescr(&descr)); CUSPARSE_CALL(hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); CUSPARSE_CALL(hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO)); CUSPARSE_CALL(Xcsrmm2<DType>( thr_entry->cusparse_handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_TRANSPOSE, m, n, k, nnz, &alpha, descr, (valptr)? valptr : A_data, static_cast<int32_t*>(csr.indptr->data), static_cast<int32_t*>(csr.indices->data), B_data, n, &beta, trans_out, m)); CUSPARSE_CALL(hipsparseDestroyMatDescr(descr)); // transpose the output matrix _Transpose(trans_out, C_data, n, m); device->FreeWorkspace(ctx, trans_out); #endif if (valptr) device->FreeWorkspace(ctx, valptr); } /*! Cusparse implementation of SpMM on Csr format. */ template <typename DType, typename IdType> void CusparseCsrmm2Hetero( const DLContext& ctx, const CSRMatrix& csr, const DType* B_data, const DType* A_data, DType* C_data, int64_t x_length, hipStream_t strm_id) { // We use csrmm2 to perform following operation: // C = A x B, where A is a sparse matrix in csr format, B is the dense matrix for node // feature tensor. However, since cusparse only supports column-major, while our tensor // is stored in row-major, the actual computation is: // C = trans(A x trans(B)). // Currently, we use cublasXgeam to implement transposition and allocate intermediate // workspace memory for this. int int_maxlimit = std::numeric_limits<int>::max(); CHECK_GE(int_maxlimit, (csr.num_rows)); CHECK_GE(int_maxlimit, csr.num_cols); CHECK_GE(int_maxlimit, csr.indices->shape[0]); const int m = csr.num_rows; const int n = x_length; const int k = csr.num_cols; const int nnz = csr.indices->shape[0]; const DType alpha = 1.0; const DType beta = 1.0; // device auto device = runtime::DeviceAPI::Get(ctx); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); // allocate cusparse handle if needed if (!thr_entry->cusparse_handle) { CUSPARSE_CALL(hipsparseCreate(&(thr_entry->cusparse_handle))); } CUSPARSE_CALL(hipsparseSetStream(thr_entry->cusparse_handle, strm_id)); // all one data array DType* valptr = nullptr; if (!A_data) { valptr = static_cast<DType*>(device->AllocWorkspace(ctx, nnz * sizeof(DType))); _Fill(valptr, nnz, static_cast<DType>(1.)); } #if CUDART_VERSION >= 11000 hipsparseSpMatDescr_t matA; hipsparseDnMatDescr_t matB, matC; constexpr auto dtype = cuda_dtype<DType>::value; constexpr auto idtype = cusparse_idtype<IdType>::value; CUSPARSE_CALL(hipsparseCreateCsr(&matA, m, k, nnz, static_cast<IdType*>(csr.indptr->data), static_cast<IdType*>(csr.indices->data), const_cast<DType*>(valptr? valptr : A_data), idtype, idtype, HIPSPARSE_INDEX_BASE_ZERO, dtype)); CUSPARSE_CALL(hipsparseCreateDnMat(&matB, k, n, n, const_cast<DType*>(B_data), dtype, HIPSPARSE_ORDER_ROW)); CUSPARSE_CALL(hipsparseCreateDnMat(&matC, m, n, n, C_data, dtype, HIPSPARSE_ORDER_ROW)); auto transA = HIPSPARSE_OPERATION_NON_TRANSPOSE; auto transB = HIPSPARSE_OPERATION_NON_TRANSPOSE; size_t workspace_size; CUSPARSE_CALL(hipsparseSpMM_bufferSize( thr_entry->cusparse_handle, transA, transB, &alpha, matA, matB, &beta, matC, dtype, CUSPARSE_SPMM_CSR_ALG2, &workspace_size)); void* workspace = device->AllocWorkspace(ctx, workspace_size); CUSPARSE_CALL(hipsparseSpMM( thr_entry->cusparse_handle, transA, transB, &alpha, matA, matB, &beta, matC, dtype, CUSPARSE_SPMM_CSR_ALG2, workspace)); device->FreeWorkspace(ctx, workspace); CUSPARSE_CALL(hipsparseDestroySpMat(matA)); CUSPARSE_CALL(hipsparseDestroyDnMat(matB)); CUSPARSE_CALL(hipsparseDestroyDnMat(matC)); #else hipsparseMatDescr_t descr; CUSPARSE_CALL(hipsparseCreateMatDescr(&descr)); CUSPARSE_CALL(hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); CUSPARSE_CALL(hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO)); CHECK_EQ(sizeof(IdType), sizeof(int32_t)); CUSPARSE_CALL(Xcsrmm2<DType>( thr_entry->cusparse_handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_TRANSPOSE, m, n, k, nnz, &alpha, descr, (valptr)? valptr : A_data, static_cast<int32_t*>(csr.indptr->data), static_cast<int32_t*>(csr.indices->data), B_data, n, &beta, C_data, m)); CUSPARSE_CALL(hipsparseDestroyMatDescr(descr)); #endif if (valptr) device->FreeWorkspace(ctx, valptr); } } // namespace cusparse #define SWITCH_OP(op, Op, ...) \ do { \ if ((op) == "add") { \ typedef cuda::binary::Add<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "sub") { \ typedef cuda::binary::Sub<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "mul") { \ typedef cuda::binary::Mul<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "div") { \ typedef cuda::binary::Div<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "copy_lhs") { \ typedef cuda::binary::CopyLhs<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "copy_rhs") { \ typedef cuda::binary::CopyRhs<DType> Op; \ { __VA_ARGS__ } \ } else { \ LOG(FATAL) << "Unsupported SpMM binary operator: " << op; \ } \ } while (0) /*! * \brief Determine whether cusparse SpMM function is applicable. */ template <int bits, typename IdType> inline bool cusparse_available(bool more_nnz_than_matrix_size) { #if CUDART_VERSION < 11000 if (std::is_same<IdType, int>::value) if (bits > 16) return true; return false; #else if (bits == 16) return false; // cusparse's SpMM on fp16 is slow, temporally disabled. // If the CSR matrix has more NNZ than matrix size, we should not use cuSPARSE 11.1. return !more_nnz_than_matrix_size; #endif } /*! * \brief CUDA implementation of g-SpMM on Csr format. * \note use cusparse if the reduce operator is `sum` and there is * no broadcast, use dgl's kernel in other cases. */ template <int XPU, typename IdType, int bits> void SpMMCsr(const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux) { int64_t feat_len = bcast.out_len; bool is_scalar_efeat = efeat.NumElements() == csr.indices->shape[0]; bool use_efeat = op != "copy_lhs"; if (reduce == "sum") { bool more_nnz = (csr.indices->shape[0] > csr.num_rows * csr.num_cols); if (op == "copy_lhs" && cusparse_available<bits, IdType>(more_nnz)) { // cusparse int64_t x_length = 1; for (int i = 1; i < ufeat->ndim; ++i) x_length *= ufeat->shape[i]; SWITCH_BITS(bits, DType, { cusparse::CusparseCsrmm2<DType, IdType>( ufeat->ctx, csr, static_cast<DType*>(ufeat->data), nullptr, static_cast<DType*>(out->data), x_length); }); } else if (op == "mul" && is_scalar_efeat && cusparse_available<bits, IdType>(more_nnz)) { // cusparse int64_t x_length = 1; for (int i = 1; i < ufeat->ndim; ++i) x_length *= ufeat->shape[i]; if (!IsNullArray(csr.data)) { SWITCH_BITS(bits, DType, { efeat = _IndexSelect<DType, IdType>(efeat, csr.data); }); } SWITCH_BITS(bits, DType, { cusparse::CusparseCsrmm2<DType, IdType>( ufeat->ctx, csr, static_cast<DType*>(ufeat->data), static_cast<DType*>(efeat->data), static_cast<DType*>(out->data), x_length); }); } else { // general kernel SWITCH_BITS(bits, DType, { SWITCH_OP(op, Op, { cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Sum<IdType, DType> >( bcast, csr, ufeat, efeat, out, NullArray(), NullArray()); }); }); } } else if (reduce == "max") { SWITCH_BITS(bits, DType, { SWITCH_OP(op, Op, { cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Max<IdType, DType> >( bcast, csr, ufeat, efeat, out, out_aux[0], out_aux[1]); }); }); } else if (reduce == "min") { SWITCH_BITS(bits, DType, { SWITCH_OP(op, Op, { cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Min<IdType, DType> >( bcast, csr, ufeat, efeat, out, out_aux[0], out_aux[1]); }); }); } else { LOG(FATAL) << "Not implemented"; } } /*! * \brief CUDA implementation of g-SpMM on Csr format. * \note use cusparse if the reduce operator is `sum` and there is * no broadcast, use dgl's kernel in other cases. */ template <int XPU, typename IdType, int bits> void SpMMCsrHetero(const std::string& op, const std::string& reduce, const BcastOff& bcast, const std::vector<CSRMatrix>& vec_csr, const std::vector<NDArray>& vec_ufeat, const std::vector<NDArray>& vec_efeat, std::vector<NDArray> vec_out, const std::vector<NDArray>& out_aux, const std::vector<dgl_type_t>& ufeat_ntids, // ufeat node type id const std::vector<dgl_type_t>& out_ntids) { // output node type id bool is_scalar_efeat = vec_efeat.size() != 0; bool use_efeat = op != "copy_lhs"; // TODO(Israt): Resolve PR-https://github.com/dmlc/dgl/issues/2995 and use multistream auto device = runtime::DeviceAPI::Get(vec_csr[0].indptr->ctx); SWITCH_BITS(bits, DType, { std::vector<DType*> trans_out(vec_out.size(), NULL); bool use_legacy_cusparsemm = (CUDART_VERSION < 11000) && // legacy cuSPARSE does not care about NNZ, hence the argument "false". ((op == "copy_lhs" && cusparse_available<bits, IdType>(false)) || (op == "mul" && is_scalar_efeat && cusparse_available<bits, IdType>(false))); // Create temporary output buffer to store non-transposed output if (use_legacy_cusparsemm) { for (dgl_type_t ntype = 0; ntype < vec_out.size(); ++ntype) { const int m = vec_out[ntype]->shape[0]; const int n = vec_out[ntype]->shape[1]; if (m == 0) continue; DType *out = static_cast<DType*>(device->AllocWorkspace(vec_csr[0].indptr->ctx, m * n * sizeof(DType))); CUDA_CALL(hipMemset(out, 0, m * n * sizeof(DType))); trans_out[ntype] = out; } } // Check shape of ufeat for all relation type and compute feature size int64_t x_length = 1; for (dgl_type_t etype = 0; etype < (ufeat_ntids.size() - 1); ++etype) { NDArray ufeat = vec_ufeat[ufeat_ntids[etype]]; NDArray next_ufeat = vec_ufeat[ufeat_ntids[etype + 1]]; CHECK_EQ(ufeat->ndim, next_ufeat->ndim) << "Input features have different shapes"; for (int i = 1; i < ufeat->ndim; ++i) { if (ufeat->shape[i] != next_ufeat->shape[i]) { if (ufeat->shape[i] == 1 || next_ufeat->shape[i] == 1) LOG(FATAL) << "Homogenized message passing on heterogeneous graphs does not support " << "automatic broadcasting. Please manually broadcast it before calling " << "message passing functions."; else LOG(FATAL) << "Input features have different shapes."; return; } if (etype == 0) x_length *= ufeat->shape[i]; } } auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); for (dgl_type_t etype = 0; etype < ufeat_ntids.size(); ++etype) { const dgl_type_t src_id = ufeat_ntids[etype]; const dgl_type_t dst_id = out_ntids[etype]; CSRMatrix csr = vec_csr[etype]; if (reduce == "sum") { bool more_nnz = (csr.indices->shape[0] > csr.num_rows * csr.num_cols); /* Call SpMM for each relation type */ if (op == "copy_lhs" && cusparse_available<bits, IdType>(more_nnz)) { // cusparse /* If CUDA is less than 11.0, put the output in trans_out for later transposition */ DType *out = (CUDART_VERSION < 11000) ? trans_out[dst_id] : static_cast<DType*>(vec_out[dst_id]->data); cusparse::CusparseCsrmm2Hetero<DType, IdType>( csr.indptr->ctx, csr, static_cast<DType*>(vec_ufeat[src_id]->data), nullptr, out, x_length, thr_entry->stream); } else if (op == "mul" && is_scalar_efeat && cusparse_available<bits, IdType>(more_nnz)) { // cusparse NDArray efeat = vec_efeat[etype]; if (!IsNullArray(csr.data)) efeat = _IndexSelect<DType, IdType>(vec_efeat[etype], csr.data); cusparse::CusparseCsrmm2Hetero<DType, IdType>( csr.indptr->ctx, csr, static_cast<DType*>(vec_ufeat[src_id]->data), static_cast<DType*>(efeat->data), // TODO(Israt): Change vec_out to trans_out to support CUDA version < 11 static_cast<DType*>(vec_out[dst_id]->data), x_length, thr_entry->stream); } else { // general kernel NDArray ufeat = (vec_ufeat.size() == 0) ? NullArray() : vec_ufeat[src_id]; NDArray efeat = (vec_efeat.size() == 0) ? NullArray() : vec_efeat[etype]; SWITCH_OP(op, Op, { cuda::SpMMCsrHetero<IdType, DType, Op, cuda::reduce::Sum<IdType, DType> >( bcast, csr, ufeat, efeat, vec_out[dst_id], NullArray(), NullArray(), thr_entry->stream); }); } } else if (reduce == "max") { // SWITCH_BITS(bits, DType, { SWITCH_OP(op, Op, { NDArray ufeat = (vec_ufeat.size() == 0) ? NullArray() : vec_ufeat[src_id]; NDArray efeat = (vec_efeat.size() == 0) ? NullArray() : vec_efeat[etype]; cuda::SpMMCsrHetero<IdType, DType, Op, cuda::reduce::Max<IdType, DType> >( bcast, csr, ufeat, efeat, vec_out[dst_id], out_aux[0], out_aux[1], thr_entry->stream); }); // }); } else if (reduce == "min") { // SWITCH_BITS(bits, DType, { SWITCH_OP(op, Op, { NDArray ufeat = (vec_ufeat.size() == 0) ? NullArray() : vec_ufeat[src_id]; NDArray efeat = (vec_efeat.size() == 0) ? NullArray() : vec_efeat[etype]; cuda::SpMMCsrHetero<IdType, DType, Op, cuda::reduce::Min<IdType, DType> >( bcast, csr, ufeat, efeat, vec_out[dst_id], out_aux[0], out_aux[1], thr_entry->stream); // }); }); } else { LOG(FATAL) << "Not implemented"; } } if (use_legacy_cusparsemm) { // transpose output for (dgl_type_t ntype = 0; ntype < vec_out.size(); ++ntype) { const int m = vec_out[ntype]->shape[0]; const int n = vec_out[ntype]->shape[1]; if (m == 0) continue; DType *C_data = static_cast<DType*>(vec_out[ntype]->data); _Transpose(trans_out[ntype], C_data, n, m); device->FreeWorkspace(vec_csr[0].indptr->ctx, trans_out[ntype]); } } }); } /*! * \brief CUDA implementation of g-SpMM on Coo format. */ template <int XPU, typename IdType, int bits> void SpMMCoo(const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux) { if (reduce == "sum") { SWITCH_BITS(bits, DType, { SWITCH_OP(op, Op, { cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Sum<IdType, DType, true> > ( bcast, coo, ufeat, efeat, out, NullArray(), NullArray()); }); }); } else if (reduce == "max") { SWITCH_BITS(bits, DType, { SWITCH_OP(op, Op, { cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Max<IdType, DType, true> > ( bcast, coo, ufeat, efeat, out, out_aux[0], out_aux[1]); }); }); } else if (reduce == "min") { SWITCH_BITS(bits, DType, { SWITCH_OP(op, Op, { cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Min<IdType, DType, true> > ( bcast, coo, ufeat, efeat, out, out_aux[0], out_aux[1]); }); }); } else { LOG(FATAL) << "Not implemented"; } } template void SpMMCsr<kDLGPU, int32_t, 16>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCsr<kDLGPU, int64_t, 16>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCsr<kDLGPU, int32_t, 32>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCsr<kDLGPU, int64_t, 32>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCsr<kDLGPU, int32_t, 64>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCsr<kDLGPU, int64_t, 64>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCsrHetero<kDLGPU, int32_t, 16>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const std::vector<CSRMatrix>& csr, const std::vector<NDArray>& ufeat, const std::vector<NDArray>& efeat, std::vector<NDArray> out, const std::vector<NDArray>& out_aux, const std::vector<dgl_type_t>& ufeat_ntids, const std::vector<dgl_type_t>& out_ntids); template void SpMMCsrHetero<kDLGPU, int64_t, 16>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const std::vector<CSRMatrix>& csr, const std::vector<NDArray>& ufeat, const std::vector<NDArray>& efeat, std::vector<NDArray> out, const std::vector<NDArray>& out_aux, const std::vector<dgl_type_t>& ufeat_ntids, const std::vector<dgl_type_t>& out_ntids); template void SpMMCsrHetero<kDLGPU, int32_t, 32>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const std::vector<CSRMatrix>& csr, const std::vector<NDArray>& ufeat, const std::vector<NDArray>& efeat, std::vector<NDArray> out, const std::vector<NDArray>& out_aux, const std::vector<dgl_type_t>& ufeat_ntids, const std::vector<dgl_type_t>& out_ntids); template void SpMMCsrHetero<kDLGPU, int64_t, 32>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const std::vector<CSRMatrix>& csr, const std::vector<NDArray>& ufeat, const std::vector<NDArray>& efeat, std::vector<NDArray> out, const std::vector<NDArray>& out_aux, const std::vector<dgl_type_t>& ufeat_ntids, const std::vector<dgl_type_t>& out_ntids); template void SpMMCsrHetero<kDLGPU, int32_t, 64>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const std::vector<CSRMatrix>& csr, const std::vector<NDArray>& ufeat, const std::vector<NDArray>& efeat, std::vector<NDArray> out, const std::vector<NDArray>& out_aux, const std::vector<dgl_type_t>& ufeat_ntids, const std::vector<dgl_type_t>& out_ntids); template void SpMMCsrHetero<kDLGPU, int64_t, 64>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const std::vector<CSRMatrix>& csr, const std::vector<NDArray>& ufeat, const std::vector<NDArray>& efeat, std::vector<NDArray> out, const std::vector<NDArray>& out_aux, const std::vector<dgl_type_t>& ufeat_ntids, const std::vector<dgl_type_t>& out_ntids); template void SpMMCoo<kDLGPU, int32_t, 16>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCoo<kDLGPU, int64_t, 16>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCoo<kDLGPU, int32_t, 32>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCoo<kDLGPU, int64_t, 32>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCoo<kDLGPU, int32_t, 64>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCoo<kDLGPU, int64_t, 64>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); } // namespace aten } // namespace dgl
a325009e5628aa629efa21b671eb6e0bdb297b0a.cu
/*! * Copyright (c) 2020 by Contributors * \file array/cuda/spmm.cu * \brief SPMM C APIs and definitions. */ #include <dgl/array.h> #include "./spmm.cuh" #include "./ge_spmm.cuh" #include "./functor.cuh" #include "../../runtime/cuda/cuda_common.h" namespace dgl { using namespace cuda; namespace aten { namespace { /*! \brief Call cuBLAS geam API for transpose operation for float and double. */ template <typename DType> cublasStatus_t Xgeam(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, const DType* alpha, const DType* A, int lda, const DType* beta, const DType* B, int ldb, DType* C, int ldc) { LOG(INFO) << "Not supported dtype"; return CUBLAS_STATUS_EXECUTION_FAILED; } template <> cublasStatus_t Xgeam<float>(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, const float* alpha, const float* A, int lda, const float* beta, const float* B, int ldb, float* C, int ldc) { return cublasSgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } template <> cublasStatus_t Xgeam<double>(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, const double* alpha, const double* A, int lda, const double* beta, const double* B, int ldb, double* C, int ldc) { return cublasDgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } /* \brief IndexSelect operator kernel implementation. * \note duplicate of IndexSelectKernel defined in array_index_select.cu */ template <typename DType, typename IdType> __global__ void _IndexSelectKernel( const DType* __restrict__ in, const IdType* __restrict__ idx, DType* __restrict__ out, int n, int m) { int i = blockIdx.x; for (int j = threadIdx.x; j < m; j += blockDim.x) out[i * m + j] = in[idx[i] * m + j]; } /* \brief Transpose operator kernel implementation. * \note not efficient but it's not a bottleneck, used for float16 dtype. */ template <typename DType> __global__ void _TransposeKernel( const DType* __restrict__ in, DType* __restrict__ out, int n, int m) { int i = blockIdx.x; for (int j = threadIdx.x; j < m; j += blockDim.x) out[i * m + j] = in[j * n + i]; } /* * \brief Tranpose the input matrix. * \param row number of rows of input matrix. * \param col number of columns of input matrix. */ template <typename DType> void _Transpose(const DType* in, DType* out, int row, int col) { DType alpha = 1., beta = 0.; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); if (!thr_entry->cublas_handle) CUBLAS_CALL(cublasCreate(&(thr_entry->cublas_handle))); CUBLAS_CALL(cublasSetStream(thr_entry->cublas_handle, thr_entry->stream)); CUBLAS_CALL(Xgeam<DType>( thr_entry->cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, row, col, &alpha, in, col, &beta, nullptr, row, out, row)); } /* * \brief Tranpose the input matrix for data type half. * \note cuBLAS has no geam API for half data type, fallback to our kernel. */ template <> void _Transpose<half>(const half* in, half* out, int row, int col) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); int nt = FindNumThreads(row); int nb = col; CUDA_KERNEL_CALL(_TransposeKernel, nb, nt, 0, thr_entry->stream, in, out, col, row); } /* * \brief */ template <typename DType, typename IdType> __global__ void _IndexSelectKernel(const DType* array, const IdType* index, int64_t length, DType* out) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { out[tx] = array[index[tx]]; tx += stride_x; } } /* \brief IndexSelect operator. * \note duplicate of IndexSelect defined in array_op.h but it can * not be applied to float16 dtype. */ template<typename DType, typename IdType> NDArray _IndexSelect(NDArray array, NDArray index) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const DType* array_data = static_cast<DType*>(array->data); const IdType* idx_data = static_cast<IdType*>(index->data); const int64_t arr_len = array->shape[0]; const int64_t len = index->shape[0]; NDArray ret = NDArray::Empty({len}, array->dtype, array->ctx); if (len == 0) return ret; DType* ret_data = static_cast<DType*>(ret->data); const int nt = FindNumThreads(len); const int nb = (len + nt - 1) / nt; CUDA_KERNEL_CALL(_IndexSelectKernel, nb, nt, 0, thr_entry->stream, array_data, idx_data, len, ret_data); return ret; } } // namespace namespace cusparse { #if CUDART_VERSION < 11000 template <typename DType> cusparseStatus_t Xcsrmm2(cusparseHandle_t handle, cusparseOperation_t transA, cusparseOperation_t transB, int m, int n, int k, int nnz, const DType* alpha, const cusparseMatDescr_t descrA, const DType* csrValA, const int* csrRowPtrA, const int* csrColIndA, const DType* B, int ldb, const DType* beta, DType* C, int ldc) { LOG(INFO) << "Not supported dtype"; return CUSPARSE_STATUS_EXECUTION_FAILED; } template <> cusparseStatus_t Xcsrmm2<float>(cusparseHandle_t handle, cusparseOperation_t transA, cusparseOperation_t transB, int m, int n, int k, int nnz, const float* alpha, const cusparseMatDescr_t descrA, const float* csrValA, const int* csrRowPtrA, const int* csrColIndA, const float* B, int ldb, const float* beta, float* C, int ldc) { return cusparseScsrmm2(handle, transA, transB, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc); } template <> cusparseStatus_t Xcsrmm2<double>(cusparseHandle_t handle, cusparseOperation_t transA, cusparseOperation_t transB, int m, int n, int k, int nnz, const double* alpha, const cusparseMatDescr_t descrA, const double* csrValA, const int* csrRowPtrA, const int* csrColIndA, const double* B, int ldb, const double* beta, double* C, int ldc) { return cusparseDcsrmm2(handle, transA, transB, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc); } #endif /*! Cusparse implementation of SpMM on Csr format. */ template <typename DType, typename IdType> void CusparseCsrmm2( const DLContext& ctx, const CSRMatrix& csr, const DType* B_data, const DType* A_data, DType* C_data, int x_length) { // We use csrmm2 to perform following operation: // C = A x B, where A is a sparse matrix in csr format, B is the dense matrix for node // feature tensor. However, since cusparse only supports column-major, while our tensor // is stored in row-major, the actual computation is: // C = trans(A x trans(B)). // Currently, we use cublasXgeam to implement transposition and allocate intermediate // workspace memory for this. const int m = csr.num_rows; const int n = x_length; const int k = csr.num_cols; const int nnz = csr.indices->shape[0]; const DType alpha = 1.0; const DType beta = 0.0; // device auto device = runtime::DeviceAPI::Get(ctx); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); // allocate cusparse handle if needed if (!thr_entry->cusparse_handle) { CUSPARSE_CALL(cusparseCreate(&(thr_entry->cusparse_handle))); } CUSPARSE_CALL(cusparseSetStream(thr_entry->cusparse_handle, thr_entry->stream)); // all one data array DType* valptr = nullptr; if (!A_data) { valptr = static_cast<DType*>(device->AllocWorkspace(ctx, nnz * sizeof(DType))); _Fill(valptr, nnz, static_cast<DType>(1.)); } #if CUDART_VERSION >= 11000 cusparseSpMatDescr_t matA; cusparseDnMatDescr_t matB, matC; constexpr auto dtype = cuda_dtype<DType>::value; constexpr auto idtype = cusparse_idtype<IdType>::value; CUSPARSE_CALL(cusparseCreateCsr(&matA, m, k, nnz, static_cast<IdType*>(csr.indptr->data), static_cast<IdType*>(csr.indices->data), const_cast<DType*>(valptr? valptr : A_data), idtype, idtype, CUSPARSE_INDEX_BASE_ZERO, dtype)); CUSPARSE_CALL(cusparseCreateDnMat(&matB, k, n, n, const_cast<DType*>(B_data), dtype, CUSPARSE_ORDER_ROW)); CUSPARSE_CALL(cusparseCreateDnMat(&matC, m, n, n, C_data, dtype, CUSPARSE_ORDER_ROW)); auto transA = CUSPARSE_OPERATION_NON_TRANSPOSE; auto transB = CUSPARSE_OPERATION_NON_TRANSPOSE; size_t workspace_size; CUSPARSE_CALL(cusparseSpMM_bufferSize( thr_entry->cusparse_handle, transA, transB, &alpha, matA, matB, &beta, matC, dtype, CUSPARSE_SPMM_CSR_ALG2, &workspace_size)); void* workspace = device->AllocWorkspace(ctx, workspace_size); CUSPARSE_CALL(cusparseSpMM( thr_entry->cusparse_handle, transA, transB, &alpha, matA, matB, &beta, matC, dtype, CUSPARSE_SPMM_CSR_ALG2, workspace)); device->FreeWorkspace(ctx, workspace); CUSPARSE_CALL(cusparseDestroySpMat(matA)); CUSPARSE_CALL(cusparseDestroyDnMat(matB)); CUSPARSE_CALL(cusparseDestroyDnMat(matC)); #else // allocate matrix for temporary transposed output DType* trans_out = static_cast<DType*>(device->AllocWorkspace(ctx, m * n * sizeof(DType))); cusparseMatDescr_t descr; CUSPARSE_CALL(cusparseCreateMatDescr(&descr)); CUSPARSE_CALL(cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL)); CUSPARSE_CALL(cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO)); CUSPARSE_CALL(Xcsrmm2<DType>( thr_entry->cusparse_handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE, m, n, k, nnz, &alpha, descr, (valptr)? valptr : A_data, static_cast<int32_t*>(csr.indptr->data), static_cast<int32_t*>(csr.indices->data), B_data, n, &beta, trans_out, m)); CUSPARSE_CALL(cusparseDestroyMatDescr(descr)); // transpose the output matrix _Transpose(trans_out, C_data, n, m); device->FreeWorkspace(ctx, trans_out); #endif if (valptr) device->FreeWorkspace(ctx, valptr); } /*! Cusparse implementation of SpMM on Csr format. */ template <typename DType, typename IdType> void CusparseCsrmm2Hetero( const DLContext& ctx, const CSRMatrix& csr, const DType* B_data, const DType* A_data, DType* C_data, int64_t x_length, cudaStream_t strm_id) { // We use csrmm2 to perform following operation: // C = A x B, where A is a sparse matrix in csr format, B is the dense matrix for node // feature tensor. However, since cusparse only supports column-major, while our tensor // is stored in row-major, the actual computation is: // C = trans(A x trans(B)). // Currently, we use cublasXgeam to implement transposition and allocate intermediate // workspace memory for this. int int_maxlimit = std::numeric_limits<int>::max(); CHECK_GE(int_maxlimit, (csr.num_rows)); CHECK_GE(int_maxlimit, csr.num_cols); CHECK_GE(int_maxlimit, csr.indices->shape[0]); const int m = csr.num_rows; const int n = x_length; const int k = csr.num_cols; const int nnz = csr.indices->shape[0]; const DType alpha = 1.0; const DType beta = 1.0; // device auto device = runtime::DeviceAPI::Get(ctx); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); // allocate cusparse handle if needed if (!thr_entry->cusparse_handle) { CUSPARSE_CALL(cusparseCreate(&(thr_entry->cusparse_handle))); } CUSPARSE_CALL(cusparseSetStream(thr_entry->cusparse_handle, strm_id)); // all one data array DType* valptr = nullptr; if (!A_data) { valptr = static_cast<DType*>(device->AllocWorkspace(ctx, nnz * sizeof(DType))); _Fill(valptr, nnz, static_cast<DType>(1.)); } #if CUDART_VERSION >= 11000 cusparseSpMatDescr_t matA; cusparseDnMatDescr_t matB, matC; constexpr auto dtype = cuda_dtype<DType>::value; constexpr auto idtype = cusparse_idtype<IdType>::value; CUSPARSE_CALL(cusparseCreateCsr(&matA, m, k, nnz, static_cast<IdType*>(csr.indptr->data), static_cast<IdType*>(csr.indices->data), const_cast<DType*>(valptr? valptr : A_data), idtype, idtype, CUSPARSE_INDEX_BASE_ZERO, dtype)); CUSPARSE_CALL(cusparseCreateDnMat(&matB, k, n, n, const_cast<DType*>(B_data), dtype, CUSPARSE_ORDER_ROW)); CUSPARSE_CALL(cusparseCreateDnMat(&matC, m, n, n, C_data, dtype, CUSPARSE_ORDER_ROW)); auto transA = CUSPARSE_OPERATION_NON_TRANSPOSE; auto transB = CUSPARSE_OPERATION_NON_TRANSPOSE; size_t workspace_size; CUSPARSE_CALL(cusparseSpMM_bufferSize( thr_entry->cusparse_handle, transA, transB, &alpha, matA, matB, &beta, matC, dtype, CUSPARSE_SPMM_CSR_ALG2, &workspace_size)); void* workspace = device->AllocWorkspace(ctx, workspace_size); CUSPARSE_CALL(cusparseSpMM( thr_entry->cusparse_handle, transA, transB, &alpha, matA, matB, &beta, matC, dtype, CUSPARSE_SPMM_CSR_ALG2, workspace)); device->FreeWorkspace(ctx, workspace); CUSPARSE_CALL(cusparseDestroySpMat(matA)); CUSPARSE_CALL(cusparseDestroyDnMat(matB)); CUSPARSE_CALL(cusparseDestroyDnMat(matC)); #else cusparseMatDescr_t descr; CUSPARSE_CALL(cusparseCreateMatDescr(&descr)); CUSPARSE_CALL(cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL)); CUSPARSE_CALL(cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO)); CHECK_EQ(sizeof(IdType), sizeof(int32_t)); CUSPARSE_CALL(Xcsrmm2<DType>( thr_entry->cusparse_handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE, m, n, k, nnz, &alpha, descr, (valptr)? valptr : A_data, static_cast<int32_t*>(csr.indptr->data), static_cast<int32_t*>(csr.indices->data), B_data, n, &beta, C_data, m)); CUSPARSE_CALL(cusparseDestroyMatDescr(descr)); #endif if (valptr) device->FreeWorkspace(ctx, valptr); } } // namespace cusparse #define SWITCH_OP(op, Op, ...) \ do { \ if ((op) == "add") { \ typedef cuda::binary::Add<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "sub") { \ typedef cuda::binary::Sub<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "mul") { \ typedef cuda::binary::Mul<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "div") { \ typedef cuda::binary::Div<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "copy_lhs") { \ typedef cuda::binary::CopyLhs<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "copy_rhs") { \ typedef cuda::binary::CopyRhs<DType> Op; \ { __VA_ARGS__ } \ } else { \ LOG(FATAL) << "Unsupported SpMM binary operator: " << op; \ } \ } while (0) /*! * \brief Determine whether cusparse SpMM function is applicable. */ template <int bits, typename IdType> inline bool cusparse_available(bool more_nnz_than_matrix_size) { #if CUDART_VERSION < 11000 if (std::is_same<IdType, int>::value) if (bits > 16) return true; return false; #else if (bits == 16) return false; // cusparse's SpMM on fp16 is slow, temporally disabled. // If the CSR matrix has more NNZ than matrix size, we should not use cuSPARSE 11.1. return !more_nnz_than_matrix_size; #endif } /*! * \brief CUDA implementation of g-SpMM on Csr format. * \note use cusparse if the reduce operator is `sum` and there is * no broadcast, use dgl's kernel in other cases. */ template <int XPU, typename IdType, int bits> void SpMMCsr(const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux) { int64_t feat_len = bcast.out_len; bool is_scalar_efeat = efeat.NumElements() == csr.indices->shape[0]; bool use_efeat = op != "copy_lhs"; if (reduce == "sum") { bool more_nnz = (csr.indices->shape[0] > csr.num_rows * csr.num_cols); if (op == "copy_lhs" && cusparse_available<bits, IdType>(more_nnz)) { // cusparse int64_t x_length = 1; for (int i = 1; i < ufeat->ndim; ++i) x_length *= ufeat->shape[i]; SWITCH_BITS(bits, DType, { cusparse::CusparseCsrmm2<DType, IdType>( ufeat->ctx, csr, static_cast<DType*>(ufeat->data), nullptr, static_cast<DType*>(out->data), x_length); }); } else if (op == "mul" && is_scalar_efeat && cusparse_available<bits, IdType>(more_nnz)) { // cusparse int64_t x_length = 1; for (int i = 1; i < ufeat->ndim; ++i) x_length *= ufeat->shape[i]; if (!IsNullArray(csr.data)) { SWITCH_BITS(bits, DType, { efeat = _IndexSelect<DType, IdType>(efeat, csr.data); }); } SWITCH_BITS(bits, DType, { cusparse::CusparseCsrmm2<DType, IdType>( ufeat->ctx, csr, static_cast<DType*>(ufeat->data), static_cast<DType*>(efeat->data), static_cast<DType*>(out->data), x_length); }); } else { // general kernel SWITCH_BITS(bits, DType, { SWITCH_OP(op, Op, { cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Sum<IdType, DType> >( bcast, csr, ufeat, efeat, out, NullArray(), NullArray()); }); }); } } else if (reduce == "max") { SWITCH_BITS(bits, DType, { SWITCH_OP(op, Op, { cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Max<IdType, DType> >( bcast, csr, ufeat, efeat, out, out_aux[0], out_aux[1]); }); }); } else if (reduce == "min") { SWITCH_BITS(bits, DType, { SWITCH_OP(op, Op, { cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Min<IdType, DType> >( bcast, csr, ufeat, efeat, out, out_aux[0], out_aux[1]); }); }); } else { LOG(FATAL) << "Not implemented"; } } /*! * \brief CUDA implementation of g-SpMM on Csr format. * \note use cusparse if the reduce operator is `sum` and there is * no broadcast, use dgl's kernel in other cases. */ template <int XPU, typename IdType, int bits> void SpMMCsrHetero(const std::string& op, const std::string& reduce, const BcastOff& bcast, const std::vector<CSRMatrix>& vec_csr, const std::vector<NDArray>& vec_ufeat, const std::vector<NDArray>& vec_efeat, std::vector<NDArray> vec_out, const std::vector<NDArray>& out_aux, const std::vector<dgl_type_t>& ufeat_ntids, // ufeat node type id const std::vector<dgl_type_t>& out_ntids) { // output node type id bool is_scalar_efeat = vec_efeat.size() != 0; bool use_efeat = op != "copy_lhs"; // TODO(Israt): Resolve PR-https://github.com/dmlc/dgl/issues/2995 and use multistream auto device = runtime::DeviceAPI::Get(vec_csr[0].indptr->ctx); SWITCH_BITS(bits, DType, { std::vector<DType*> trans_out(vec_out.size(), NULL); bool use_legacy_cusparsemm = (CUDART_VERSION < 11000) && // legacy cuSPARSE does not care about NNZ, hence the argument "false". ((op == "copy_lhs" && cusparse_available<bits, IdType>(false)) || (op == "mul" && is_scalar_efeat && cusparse_available<bits, IdType>(false))); // Create temporary output buffer to store non-transposed output if (use_legacy_cusparsemm) { for (dgl_type_t ntype = 0; ntype < vec_out.size(); ++ntype) { const int m = vec_out[ntype]->shape[0]; const int n = vec_out[ntype]->shape[1]; if (m == 0) continue; DType *out = static_cast<DType*>(device->AllocWorkspace(vec_csr[0].indptr->ctx, m * n * sizeof(DType))); CUDA_CALL(cudaMemset(out, 0, m * n * sizeof(DType))); trans_out[ntype] = out; } } // Check shape of ufeat for all relation type and compute feature size int64_t x_length = 1; for (dgl_type_t etype = 0; etype < (ufeat_ntids.size() - 1); ++etype) { NDArray ufeat = vec_ufeat[ufeat_ntids[etype]]; NDArray next_ufeat = vec_ufeat[ufeat_ntids[etype + 1]]; CHECK_EQ(ufeat->ndim, next_ufeat->ndim) << "Input features have different shapes"; for (int i = 1; i < ufeat->ndim; ++i) { if (ufeat->shape[i] != next_ufeat->shape[i]) { if (ufeat->shape[i] == 1 || next_ufeat->shape[i] == 1) LOG(FATAL) << "Homogenized message passing on heterogeneous graphs does not support " << "automatic broadcasting. Please manually broadcast it before calling " << "message passing functions."; else LOG(FATAL) << "Input features have different shapes."; return; } if (etype == 0) x_length *= ufeat->shape[i]; } } auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); for (dgl_type_t etype = 0; etype < ufeat_ntids.size(); ++etype) { const dgl_type_t src_id = ufeat_ntids[etype]; const dgl_type_t dst_id = out_ntids[etype]; CSRMatrix csr = vec_csr[etype]; if (reduce == "sum") { bool more_nnz = (csr.indices->shape[0] > csr.num_rows * csr.num_cols); /* Call SpMM for each relation type */ if (op == "copy_lhs" && cusparse_available<bits, IdType>(more_nnz)) { // cusparse /* If CUDA is less than 11.0, put the output in trans_out for later transposition */ DType *out = (CUDART_VERSION < 11000) ? trans_out[dst_id] : static_cast<DType*>(vec_out[dst_id]->data); cusparse::CusparseCsrmm2Hetero<DType, IdType>( csr.indptr->ctx, csr, static_cast<DType*>(vec_ufeat[src_id]->data), nullptr, out, x_length, thr_entry->stream); } else if (op == "mul" && is_scalar_efeat && cusparse_available<bits, IdType>(more_nnz)) { // cusparse NDArray efeat = vec_efeat[etype]; if (!IsNullArray(csr.data)) efeat = _IndexSelect<DType, IdType>(vec_efeat[etype], csr.data); cusparse::CusparseCsrmm2Hetero<DType, IdType>( csr.indptr->ctx, csr, static_cast<DType*>(vec_ufeat[src_id]->data), static_cast<DType*>(efeat->data), // TODO(Israt): Change vec_out to trans_out to support CUDA version < 11 static_cast<DType*>(vec_out[dst_id]->data), x_length, thr_entry->stream); } else { // general kernel NDArray ufeat = (vec_ufeat.size() == 0) ? NullArray() : vec_ufeat[src_id]; NDArray efeat = (vec_efeat.size() == 0) ? NullArray() : vec_efeat[etype]; SWITCH_OP(op, Op, { cuda::SpMMCsrHetero<IdType, DType, Op, cuda::reduce::Sum<IdType, DType> >( bcast, csr, ufeat, efeat, vec_out[dst_id], NullArray(), NullArray(), thr_entry->stream); }); } } else if (reduce == "max") { // SWITCH_BITS(bits, DType, { SWITCH_OP(op, Op, { NDArray ufeat = (vec_ufeat.size() == 0) ? NullArray() : vec_ufeat[src_id]; NDArray efeat = (vec_efeat.size() == 0) ? NullArray() : vec_efeat[etype]; cuda::SpMMCsrHetero<IdType, DType, Op, cuda::reduce::Max<IdType, DType> >( bcast, csr, ufeat, efeat, vec_out[dst_id], out_aux[0], out_aux[1], thr_entry->stream); }); // }); } else if (reduce == "min") { // SWITCH_BITS(bits, DType, { SWITCH_OP(op, Op, { NDArray ufeat = (vec_ufeat.size() == 0) ? NullArray() : vec_ufeat[src_id]; NDArray efeat = (vec_efeat.size() == 0) ? NullArray() : vec_efeat[etype]; cuda::SpMMCsrHetero<IdType, DType, Op, cuda::reduce::Min<IdType, DType> >( bcast, csr, ufeat, efeat, vec_out[dst_id], out_aux[0], out_aux[1], thr_entry->stream); // }); }); } else { LOG(FATAL) << "Not implemented"; } } if (use_legacy_cusparsemm) { // transpose output for (dgl_type_t ntype = 0; ntype < vec_out.size(); ++ntype) { const int m = vec_out[ntype]->shape[0]; const int n = vec_out[ntype]->shape[1]; if (m == 0) continue; DType *C_data = static_cast<DType*>(vec_out[ntype]->data); _Transpose(trans_out[ntype], C_data, n, m); device->FreeWorkspace(vec_csr[0].indptr->ctx, trans_out[ntype]); } } }); } /*! * \brief CUDA implementation of g-SpMM on Coo format. */ template <int XPU, typename IdType, int bits> void SpMMCoo(const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux) { if (reduce == "sum") { SWITCH_BITS(bits, DType, { SWITCH_OP(op, Op, { cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Sum<IdType, DType, true> > ( bcast, coo, ufeat, efeat, out, NullArray(), NullArray()); }); }); } else if (reduce == "max") { SWITCH_BITS(bits, DType, { SWITCH_OP(op, Op, { cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Max<IdType, DType, true> > ( bcast, coo, ufeat, efeat, out, out_aux[0], out_aux[1]); }); }); } else if (reduce == "min") { SWITCH_BITS(bits, DType, { SWITCH_OP(op, Op, { cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Min<IdType, DType, true> > ( bcast, coo, ufeat, efeat, out, out_aux[0], out_aux[1]); }); }); } else { LOG(FATAL) << "Not implemented"; } } template void SpMMCsr<kDLGPU, int32_t, 16>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCsr<kDLGPU, int64_t, 16>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCsr<kDLGPU, int32_t, 32>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCsr<kDLGPU, int64_t, 32>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCsr<kDLGPU, int32_t, 64>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCsr<kDLGPU, int64_t, 64>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCsrHetero<kDLGPU, int32_t, 16>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const std::vector<CSRMatrix>& csr, const std::vector<NDArray>& ufeat, const std::vector<NDArray>& efeat, std::vector<NDArray> out, const std::vector<NDArray>& out_aux, const std::vector<dgl_type_t>& ufeat_ntids, const std::vector<dgl_type_t>& out_ntids); template void SpMMCsrHetero<kDLGPU, int64_t, 16>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const std::vector<CSRMatrix>& csr, const std::vector<NDArray>& ufeat, const std::vector<NDArray>& efeat, std::vector<NDArray> out, const std::vector<NDArray>& out_aux, const std::vector<dgl_type_t>& ufeat_ntids, const std::vector<dgl_type_t>& out_ntids); template void SpMMCsrHetero<kDLGPU, int32_t, 32>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const std::vector<CSRMatrix>& csr, const std::vector<NDArray>& ufeat, const std::vector<NDArray>& efeat, std::vector<NDArray> out, const std::vector<NDArray>& out_aux, const std::vector<dgl_type_t>& ufeat_ntids, const std::vector<dgl_type_t>& out_ntids); template void SpMMCsrHetero<kDLGPU, int64_t, 32>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const std::vector<CSRMatrix>& csr, const std::vector<NDArray>& ufeat, const std::vector<NDArray>& efeat, std::vector<NDArray> out, const std::vector<NDArray>& out_aux, const std::vector<dgl_type_t>& ufeat_ntids, const std::vector<dgl_type_t>& out_ntids); template void SpMMCsrHetero<kDLGPU, int32_t, 64>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const std::vector<CSRMatrix>& csr, const std::vector<NDArray>& ufeat, const std::vector<NDArray>& efeat, std::vector<NDArray> out, const std::vector<NDArray>& out_aux, const std::vector<dgl_type_t>& ufeat_ntids, const std::vector<dgl_type_t>& out_ntids); template void SpMMCsrHetero<kDLGPU, int64_t, 64>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const std::vector<CSRMatrix>& csr, const std::vector<NDArray>& ufeat, const std::vector<NDArray>& efeat, std::vector<NDArray> out, const std::vector<NDArray>& out_aux, const std::vector<dgl_type_t>& ufeat_ntids, const std::vector<dgl_type_t>& out_ntids); template void SpMMCoo<kDLGPU, int32_t, 16>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCoo<kDLGPU, int64_t, 16>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCoo<kDLGPU, int32_t, 32>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCoo<kDLGPU, int64_t, 32>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCoo<kDLGPU, int32_t, 64>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCoo<kDLGPU, int64_t, 64>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); } // namespace aten } // namespace dgl
d01f0c6c993f3500c4d14a1196c294e65428ad43.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************* * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * Modifications Copyright (c) 2019 Advanced Micro Devices, Inc. All rights reserved. * * See LICENSE.txt for license information ************************************************************************/ #include "hip/hip_runtime.h" #include "common.h" #include <pthread.h> #include <cstdio> #include <getopt.h> #include <libgen.h> #if NCCL_MAJOR >= 2 ncclDataType_t test_types[ncclNumTypes] = {ncclInt8, ncclUint8, ncclInt32, ncclUint32, ncclInt64, ncclUint64, ncclHalf, ncclFloat, ncclDouble}; const char *test_typenames[ncclNumTypes] = {"int8", "uint8", "int32", "uint32", "int64", "uint64", "half", "float", "double"}; #else ncclDataType_t test_types[ncclNumTypes] = {ncclChar, ncclInt, ncclHalf, ncclFloat, ncclDouble, ncclInt64, ncclUint64}; const char *test_typenames[ncclNumTypes] = {"char", "int", "half", "float", "double", "int64", "uint64"}; #endif ncclRedOp_t test_ops[ncclNumOps] = {ncclSum, ncclProd, ncclMax, ncclMin}; const char *test_opnames[ncclNumOps] = {"sum", "prod", "max", "min"}; const char *test_memorytypes[nccl_NUM_MTYPES] = {"coarse", "fine", "host"}; thread_local int is_main_thread = 0; // Command line parameter defaults static int nThreads = 1; static int nGpus = 1; static size_t minBytes = 32*1024*1024; static size_t maxBytes = 32*1024*1024; static size_t stepBytes = 1*1024*1024; static size_t stepFactor = 1; static int datacheck = 1; static int warmup_iters = 5; static int iters = 20; static int agg_iters = 1; static int ncclop = ncclSum; static int nccltype = ncclFloat; static int ncclroot = 0; static int parallel_init = 0; static int blocking_coll = 0; static int memorytype = 0; double parsesize(char *value) { long long int units; double size; if (strchr(value, 'G') != NULL) { units=1024*1024*1024; } else if (strchr(value, 'M') != NULL) { units=1024*1024; } else if (strchr(value, 'K') != NULL) { units=1024; } else { units=1; } size = atof(value)*units; return size; } double DeltaMaxValue(ncclDataType_t type) { switch(type) { case ncclHalf: return 1e-2; case ncclFloat: return 1e-5; case ncclDouble: return 1e-12; case ncclInt: #if NCCL_MAJOR >= 2 case ncclUint8: //case ncclInt32: case ncclUint32: #endif case ncclInt64: case ncclUint64: return 1e-200; } return 1e-200; } template<typename T> __device__ double absDiff(T a, T b) { return fabs((double)(b - a)); } template<> __device__ double absDiff<half>(half a, half b) { float x = __half2float(a); float y = __half2float(b); return fabs((double)(y-x)); } template<typename T> __device__ float toFloat(T a) { return (float)a; } template<> __device__ float toFloat(half a) { return __half2float(a); } template<typename T, int BSIZE> __global__ void deltaKern(void* A_, void* B_, size_t count, double* max) { const T* A = (const T*)A_; const T* B = (const T*)B_; __shared__ double temp[BSIZE]; int tid = threadIdx.x; double locmax = 0.0; for(int i=tid; i<count; i+=blockDim.x) { double delta = absDiff(A[i], B[i]); if( delta > locmax ) { locmax = delta; #ifdef DEBUG_PRINT if (delta > .1) printf("Error at %d/%ld : %f != %f\n", i, count, toFloat(A[i]), toFloat(B[i])); #endif } } temp[tid] = locmax; for(int stride = BSIZE/2; stride > 1; stride>>=1) { __syncthreads(); if( tid < stride ) temp[tid] = temp[tid] > temp[tid+stride] ? temp[tid] : temp[tid+stride]; } __syncthreads(); if( threadIdx.x == 0) *max = temp[0] > temp[1] ? temp[0] : temp[1]; } testResult_t CheckDelta(void* expected, void* results, size_t count, ncclDataType_t type, double* devmax) { switch (type) { case ncclHalf: hipLaunchKernelGGL((deltaKern<half, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break; case ncclFloat: hipLaunchKernelGGL((deltaKern<float, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break; case ncclDouble: hipLaunchKernelGGL((deltaKern<double, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break; case ncclChar: #if NCCL_MAJOR >= 2 case ncclUint8: #endif hipLaunchKernelGGL((deltaKern<uint8_t, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break; case ncclInt: #if NCCL_MAJOR >= 2 case ncclUint32: #endif hipLaunchKernelGGL((deltaKern<uint32_t, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break; case ncclInt64: case ncclUint64: hipLaunchKernelGGL((deltaKern<uint64_t, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break; } HIPCHECK(hipDeviceSynchronize()); return testSuccess; } // For integer values, we use values between 0 and 255 template<typename T> __device__ T testValue(const size_t offset, const int rep, const int rank) { uint8_t v = (rep+rank+offset) % 256; return (T)v; } // For floating point datatype, we use values between 0 and 1 otherwise the // Product operation will produce NaNs. template<> __device__ double testValue<double>(const size_t offset, const int rep, const int rank) { return 1.0/(1.0+(double)testValue<int>(offset, rep, rank)); } template<> __device__ float testValue<float>(const size_t offset, const int rep, const int rank) { return 1.0/(1.0+(float)testValue<int>(offset, rep, rank)); } template<> __device__ half testValue<half>(const size_t offset, const int rep, const int rank) { return __float2half(testValue<float>(offset, rep, rank)); } // Operations template<typename T> __device__ T ncclOpSum(T a, T b) { return a+b; } template<typename T> __device__ T ncclOpProd(T a, T b) { return a*b; } template<typename T> __device__ T ncclOpMax(T a, T b) { return a>b ? a : b; } template<typename T> __device__ T ncclOpMin(T a, T b) { return a<b ? a : b; } // Definitions for half template<> __device__ half ncclOpSum(half a, half b) { return __float2half(__half2float(a)+__half2float(b)); } template<> __device__ half ncclOpProd(half a, half b) { return __float2half(__half2float(a)*__half2float(b)); } template<> __device__ half ncclOpMax(half a, half b) { return __half2float(a)>__half2float(b) ? a : b; } template<> __device__ half ncclOpMin(half a, half b) { return __half2float(a)<__half2float(b) ? a : b; } template<typename T, T (*Op)(T, T)> __global__ void InitDataReduceKernel(void* data, const size_t N, const size_t offset, const int rep, const int nranks) { for (size_t o=blockIdx.x*blockDim.x+threadIdx.x; o<N; o+=gridDim.x*blockDim.x) { T val = testValue<T>(o+offset, rep, 0); for (int i=1; i<nranks; i++) { val = Op(val, testValue<T>(o+offset, rep, i)); } ((T*)data)[o] = val; } } typedef void(*redInitKern_t)(void* data, const size_t N, const size_t offset, const int rep, const int nranks); #define KERN(type, op) InitDataReduceKernel<type, op<type>> #define OPS(type) KERN(type, ncclOpSum), KERN(type, ncclOpProd), KERN(type, ncclOpMax), KERN(type, ncclOpMin) static redInitKern_t const redInitDataKerns[ncclNumOps*ncclNumTypes] = { #if NCCL_MAJOR >= 2 OPS(int8_t), OPS(uint8_t), OPS(int32_t), OPS(uint32_t), OPS(int64_t), OPS(uint64_t), OPS(half), OPS(float), OPS(double) #else OPS(char), OPS(int32_t), OPS(half), OPS(float), OPS(double), OPS(int64_t), OPS(uint64_t) #endif }; testResult_t InitDataReduce(void* data, const size_t count, const size_t offset, ncclDataType_t type, ncclRedOp_t op, const int rep, const int nranks) { dim3 grid = { 32, 1, 1 }; dim3 block = { 256, 1, 1 }; hipLaunchKernelGGL((redInitDataKerns[type*ncclNumOps+op]), grid, block, 0, 0, data, count, offset, rep, nranks); return testSuccess; } template<typename T> __global__ void InitDataKernel(void* data, const size_t N, const int rep, const int rank) { for (size_t o=blockIdx.x*blockDim.x+threadIdx.x; o<N; o+=gridDim.x*blockDim.x) ((T*)data)[o] = testValue<T>(o, rep, rank); } typedef void(*initDataKern_t)(void* data, const size_t N, const int rep, const int rank); static initDataKern_t const initDataKerns[ncclNumTypes] = { #if NCCL_MAJOR >= 2 InitDataKernel< int8_t>, InitDataKernel< uint8_t>, InitDataKernel< int32_t>, InitDataKernel<uint32_t>, InitDataKernel< int64_t>, InitDataKernel<uint64_t>, InitDataKernel< half>, InitDataKernel< float>, InitDataKernel< double> #else InitDataKernel< char>, InitDataKernel< int32_t>, InitDataKernel< half>, InitDataKernel< float>, InitDataKernel< double>, InitDataKernel< int64_t>, InitDataKernel<uint64_t>, #endif }; template<typename T> testResult_t InitDataType(void* dest, const size_t N, const int rep, const int rank) { T* ptr = (T*)dest; hipLaunchKernelGGL((InitDataKernel), dim3(16), dim3(512), 0, 0, ptr, N, rep, rank); return testSuccess; } testResult_t InitData(void* data, const size_t count, ncclDataType_t type, const int rep, const int rank) { dim3 grid = { 32, 1, 1 }; dim3 block = { 256, 1, 1 }; hipLaunchKernelGGL((initDataKerns[type]), grid, block, 0, 0, data, count, rep, rank); return testSuccess; } void Barrier(struct threadArgs* args) { while (args->barrier[args->barrier_idx] != args->thread) pthread_yield(); args->barrier[args->barrier_idx] = args->thread + 1; if (args->thread+1 == args->nThreads) { #ifdef MPI_SUPPORT MPI_Barrier(MPI_COMM_WORLD); #endif args->barrier[args->barrier_idx] = 0; } else { while (args->barrier[args->barrier_idx]) pthread_yield(); } args->barrier_idx=!args->barrier_idx; } testResult_t CheckData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place, double *delta, bool *error) { size_t count = args->expectedBytes/wordSize(type); double maxDelta = 0.0; for (int i=0; i<args->nGpus; i++) { int device; int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i); NCCLCHECK(ncclCommCuDevice(args->comms[i], &device)); HIPCHECK(hipSetDevice(device)); void *data = in_place ? ((void *)((uintptr_t)args->recvbuffs[i] + args->recvInplaceOffset*rank)) : args->recvbuffs[i]; TESTCHECK(CheckDelta(data , args->expected[i], count, type, args->delta)); maxDelta = ::max(*(args->deltaHost), maxDelta); #ifdef DEBUG_PRINT if (rank == 0) { int *expectedHost = (int *)malloc(args->expectedBytes); int *dataHost = (int *)malloc(args->expectedBytes); hipMemcpy(expectedHost, args->expected[0], args->expectedBytes, hipMemcpyDeviceToHost); printf("\n Expected: "); for(int j=0; j<args->expectedBytes/sizeof(int); j++) { printf("%d:%d ", j, expectedHost[j]); } printf("\n"); hipMemcpy(dataHost, data, args->expectedBytes, hipMemcpyDeviceToHost); printf("\n Actual: "); for (int j=0; j<args->expectedBytes/sizeof(int); j++) { printf("%d:%d ", j, dataHost[j]); } printf("\n"); free(temp); } #endif } double nranks = args->nProcs*args->nThreads*args->nGpus; if (maxDelta > DeltaMaxValue(type)*(nranks - 1)) { args->errors[0]++; *error = true; } *delta = maxDelta; return testSuccess; } testResult_t testStreamSynchronize(int ngpus, hipStream_t* streams, ncclComm_t* comms) { hipError_t hipErr; int remaining = ngpus; int* done = (int*)malloc(sizeof(int)*ngpus); memset(done, 0, sizeof(int)*ngpus); while (remaining) { int idle = 1; for (int i=0; i<ngpus; i++) { if (done[i]) continue; hipErr = hipStreamQuery(streams[i]); if (hipErr == hipSuccess) { done[i] = 1; remaining--; idle = 0; continue; } if (hipErr != hipErrorNotReady) HIPCHECK(hipErr); #if NCCL_MAJOR >= 2 #if NCCL_VERSION_CODE >= NCCL_VERSION(2,4,0) if (comms) { ncclResult_t ncclAsyncErr; NCCLCHECK(ncclCommGetAsyncError(comms[i], &ncclAsyncErr)); if (ncclAsyncErr != ncclSuccess) { // An asynchronous error happened. Stop the operation and destroy // the communicator for (int i=0; i<ngpus; i++) NCCLCHECK(ncclCommAbort(comms[i])); // Abort the perf test NCCLCHECK(ncclAsyncErr); } } #endif #endif } // We might want to let other threads (including NCCL threads) use the CPU. if (idle) pthread_yield(); } return testSuccess; } testResult_t startColl(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place, int iter) { size_t count = args->nbytes / wordSize(type); // Try to change offset for each iteration so that we avoid cache effects and catch race conditions in ptrExchange size_t totalnbytes = max(args->sendBytes, args->expectedBytes); size_t shift = (totalnbytes * iter) % args->maxbytes; if (shift + totalnbytes > args->maxbytes) shift = 0; if (args->nGpus > 1) NCCLCHECK(ncclGroupStart()); for (int i = 0; i < args->nGpus; i++) { #ifndef NCCL_MAJOR int hipDev; NCCLCHECK(ncclCommCuDevice(args->comms[i], &hipDev)); HIPCHECK(hipSetDevice(hipDev)); #endif int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i); char* recvBuff = ((char*)args->recvbuffs[i]) + shift; char* sendBuff = ((char*)args->sendbuffs[i]) + shift; TESTCHECK(args->collTest->runColl( (void*)(in_place ? recvBuff + args->sendInplaceOffset*rank : sendBuff), (void*)(in_place ? recvBuff + args->recvInplaceOffset*rank : recvBuff), count, type, op, root, args->comms[i], args->streams[i])); } if (args->nGpus > 1) NCCLCHECK(ncclGroupEnd()); if (blocking_coll) { // Complete op before returning TESTCHECK(testStreamSynchronize(args->nGpus, args->streams, args->comms)); } if (blocking_coll) Barrier(args); return testSuccess; } testResult_t completeColl(struct threadArgs* args) { if (blocking_coll) return testSuccess; TESTCHECK(testStreamSynchronize(args->nGpus, args->streams, args->comms)); return testSuccess; } testResult_t BenchTime(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place) { size_t count = args->nbytes / wordSize(type); // Sync TESTCHECK(startColl(args, type, op, root, in_place, 0)); TESTCHECK(completeColl(args)); Barrier(args); // Performance Benchmark auto start = std::chrono::high_resolution_clock::now(); for (int iter = 0; iter < iters; iter++) { if (agg_iters>1) NCCLCHECK(ncclGroupStart()); for (int aiter = 0; aiter < agg_iters; aiter++) { TESTCHECK(startColl(args, type, op, root, in_place, iter*agg_iters+aiter)); } if (agg_iters>1) NCCLCHECK(ncclGroupEnd()); } TESTCHECK(completeColl(args)); auto delta = std::chrono::high_resolution_clock::now() - start; double deltaSec = std::chrono::duration_cast<std::chrono::duration<double>>(delta).count(); deltaSec = deltaSec/(iters*agg_iters); double algBw, busBw; args->collTest->getBw(count, wordSize(type), deltaSec, &algBw, &busBw, args->nProcs*args->nThreads*args->nGpus); Barrier(args); double maxDelta = 0; bool error = false; static __thread int rep = 0; rep++; if (datacheck) { // Initialize sendbuffs, recvbuffs and expected TESTCHECK(args->collTest->initData(args, type, op, root, rep, in_place)); //test validation in single itertion, should ideally be included into the multi-iteration run TESTCHECK(startColl(args, type, op, root, in_place, 0)); TESTCHECK(completeColl(args)); TESTCHECK(CheckData(args, type, op, root, in_place, &maxDelta, &error)); //aggregate delta from all threads and procs Barrier(args); if (args->thread == 0) { for (int i=1; i<args->nThreads; i++) { maxDelta += args->deltaThreads[i]; } #ifdef MPI_SUPPORT MPI_Allreduce(MPI_IN_PLACE, &maxDelta, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &error, 1, MPI_C_BOOL, MPI_LOR, MPI_COMM_WORLD); #endif } Barrier(args); } double timeUsec = deltaSec*1.0E6; char timeStr[10]; if (timeUsec > 10000.0) { sprintf(timeStr, "%7.0f", timeUsec); } else if (timeUsec > 100.0) { sprintf(timeStr, "%7.1f", timeUsec); } else { sprintf(timeStr, "%7.2f", timeUsec); } if (datacheck) { PRINT(" %7s %6.2f %6.2f %5.0le%s", timeStr, algBw, busBw, maxDelta, error ? "*" : ""); } else { PRINT(" %7s %6.2f %6.2f %5s", timeStr, algBw, busBw, "N/A"); } args->bw[0] += busBw; args->bw_count[0]++; return testSuccess; } void setupArgs(size_t size, ncclDataType_t type, struct threadArgs* args) { int nranks = args->nProcs*args->nGpus*args->nThreads; size_t count, sendCount, recvCount, paramCount, sendInplaceOffset, recvInplaceOffset; count = size / wordSize(type); args->collTest->getCollByteCount(&sendCount, &recvCount, &paramCount, &sendInplaceOffset, &recvInplaceOffset, (size_t)count, (size_t)nranks); args->nbytes = paramCount * wordSize(type); args->sendBytes = sendCount * wordSize(type); args->expectedBytes = recvCount * wordSize(type); args->sendInplaceOffset = sendInplaceOffset * wordSize(type); args->recvInplaceOffset = recvInplaceOffset * wordSize(type); } testResult_t TimeTest(struct threadArgs* args, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName, int root) { // Warm-up for large size setupArgs(args->maxbytes, type, args); for (int iter = 0; iter < warmup_iters; iter++) { TESTCHECK(startColl(args, type, op, root, 0, iter)); } TESTCHECK(completeColl(args)); // Warm-up for small size setupArgs(args->minbytes, type, args); for (int iter = 0; iter < warmup_iters; iter++) { TESTCHECK(startColl(args, type, op, root, 0, iter)); } TESTCHECK(completeColl(args)); // Benchmark for (size_t size = args->minbytes; size<=args->maxbytes; size = ((args->stepfactor > 1) ? size*args->stepfactor : size+args->stepbytes)) { setupArgs(size, type, args); print_line_header(max(args->sendBytes, args->expectedBytes), args->nbytes / wordSize(type), typeName, opName, root); TESTCHECK(BenchTime(args, type, op, root, 0)); TESTCHECK(BenchTime(args, type, op, root, 1)); PRINT("\n"); } return testSuccess; } testResult_t threadRunTests(struct threadArgs* args) { // Set device to the first of our GPUs. If we don't do that, some operations // will be done on the current GPU (by default : 0) and if the GPUs are in // exclusive mode those operations will fail. int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus; HIPCHECK(hipSetDevice(gpuid)); TESTCHECK(ncclTestEngine.runTest(args, ncclroot, (ncclDataType_t)nccltype, test_typenames[nccltype], (ncclRedOp_t)ncclop, test_opnames[ncclop])); return testSuccess; } testResult_t threadInit(struct threadArgs* args) { char hostname[1024]; getHostName(hostname, 1024); int nranks = args->nProcs*args->nThreads*args->nGpus; //set main thread again is_main_thread = (args->proc == 0 && args->thread == 0) ? 1 : 0; NCCLCHECK(ncclGroupStart()); for (int i=0; i<args->nGpus; i++) { int rank = args->proc*args->nThreads*args->nGpus + args->thread*args->nGpus + i; int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i; HIPCHECK(hipSetDevice(gpuid)); NCCLCHECK(ncclCommInitRank(args->comms+i, nranks, args->ncclId, rank)); } NCCLCHECK(ncclGroupEnd()); TESTCHECK(threadRunTests(args)); for (int i=0; i<args->nGpus; i++) { #if NCCL_MAJOR >= 2 NCCLCHECK(ncclCommDestroy(args->comms[i])); #else ncclCommDestroy(args->comms[i]); #endif } return testSuccess; } void* threadLauncher(void* thread_) { struct testThread* thread = (struct testThread*)thread_; thread->ret = thread->func(&thread->args); return NULL; } testResult_t threadLaunch(struct testThread* thread) { pthread_create(&thread->thread, NULL, threadLauncher, thread); return testSuccess; } testResult_t AllocateBuffs(void **sendbuff, size_t sendBytes, void **recvbuff, size_t recvBytes, void **expected, size_t nbytes, int nranks) { if (memorytype == ncclFine) { HIPCHECK(hipExtMallocWithFlags(sendbuff, nbytes, hipDeviceMallocFinegrained)); HIPCHECK(hipExtMallocWithFlags(recvbuff, nbytes, hipDeviceMallocFinegrained)); HIPCHECK(hipExtMallocWithFlags(expected, recvBytes, hipDeviceMallocFinegrained)); } else if (memorytype == ncclHost) { HIPCHECK(hipHostMalloc(sendbuff, nbytes)); HIPCHECK(hipHostMalloc(recvbuff, nbytes)); HIPCHECK(hipHostMalloc(expected, recvBytes)); } else { HIPCHECK(hipMalloc(sendbuff, nbytes)); HIPCHECK(hipMalloc(recvbuff, nbytes)); HIPCHECK(hipMalloc(expected, recvBytes)); } return testSuccess; } testResult_t run(); // Main function int main(int argc, char* argv[]) { // Make sure everyline is flushed so that we see the progress of the test setlinebuf(stdout); // Parse args int longindex; static struct option longopts[] = { {"nthreads", required_argument, 0, 't'}, {"ngpus", required_argument, 0, 'g'}, {"minbytes", required_argument, 0, 'b'}, {"maxbytes", required_argument, 0, 'e'}, {"stepbytes", required_argument, 0, 'i'}, {"stepfactor", required_argument, 0, 'f'}, {"iters", required_argument, 0, 'n'}, {"agg_iters", required_argument, 0, 'm'}, {"warmup_iters", required_argument, 0, 'w'}, {"parallel_init", required_argument, 0, 'p'}, {"check", required_argument, 0, 'c'}, {"op", required_argument, 0, 'o'}, {"datatype", required_argument, 0, 'd'}, {"root", required_argument, 0, 'r'}, {"blocking", required_argument, 0, 'z'}, {"memory_type", required_argument, 0, 'y'}, {"help", no_argument, 0, 'h'} }; while(1) { int c; c = getopt_long(argc, argv, "t:g:b:e:i:f:n:m:w:p:c:o:d:r:z:y:h", longopts, &longindex); if (c == -1) break; switch(c) { case 't': nThreads = strtol(optarg, NULL, 0); break; case 'g': nGpus = strtol(optarg, NULL, 0); break; case 'b': minBytes = (size_t)parsesize(optarg); break; case 'e': maxBytes = (size_t)parsesize(optarg); break; case 'i': stepBytes = strtol(optarg, NULL, 0); break; case 'f': stepFactor = strtol(optarg, NULL, 0); break; case 'n': iters = (int)strtol(optarg, NULL, 0); break; case 'm': #if NCCL_MAJOR >= 2 && NCCL_MINOR >= 2 agg_iters = (int)strtol(optarg, NULL, 0); #else printf("Option -m not supported before NCCL 2.2. Ignoring\n"); #endif break; case 'w': warmup_iters = (int)strtol(optarg, NULL, 0); break; case 'c': datacheck = (int)strtol(optarg, NULL, 0); break; case 'p': parallel_init = (int)strtol(optarg, NULL, 0); break; case 'o': ncclop = ncclstringtoop(optarg); break; case 'd': nccltype = ncclstringtotype(optarg); break; case 'r': ncclroot = strtol(optarg, NULL, 0); break; case 'z': blocking_coll = strtol(optarg, NULL, 0); break; case 'y': memorytype = ncclstringtomtype(optarg); break; case 'h': printf("USAGE: %s \n\t" "[-t,--nthreads <num threads>] \n\t" "[-g,--ngpus <gpus per thread>] \n\t" "[-b,--minbytes <min size in bytes>] \n\t" "[-e,--maxbytes <max size in bytes>] \n\t" "[-i,--stepbytes <increment size>] \n\t" "[-f,--stepfactor <increment factor>] \n\t" "[-n,--iters <iteration count>] \n\t" "[-m,--agg_iters <aggregated iteration count>] \n\t" "[-w,--warmup_iters <warmup iteration count>] \n\t" "[-p,--parallel_init <0/1>] \n\t" "[-c,--check <0/1>] \n\t" "[-o,--op <sum/prod/min/max/all>] \n\t" "[-d,--datatype <nccltype/all>] \n\t" "[-r,--root <root>] \n\t" "[-z,--blocking <0/1>] \n\t" "[-y,--memory_type <coarse/fine/host>] \n\t" "[-h,--help]\n", basename(argv[0])); return 0; default: printf("invalid option \n"); printf("USAGE: %s \n\t" "[-t,--nthreads <num threads>] \n\t" "[-g,--ngpus <gpus per thread>] \n\t" "[-b,--minbytes <min size in bytes>] \n\t" "[-e,--maxbytes <max size in bytes>] \n\t" "[-i,--stepbytes <increment size>] \n\t" "[-f,--stepfactor <increment factor>] \n\t" "[-n,--iters <iteration count>] \n\t" "[-m,--agg_iters <aggregated iteration count>] \n\t" "[-w,--warmup_iters <warmup iteration count>] \n\t" "[-p,--parallel_init <0/1>] \n\t" "[-c,--check <0/1>] \n\t" "[-o,--op <sum/prod/min/max/all>] \n\t" "[-d,--datatype <nccltype/all>] \n\t" "[-r,--root <root>] \n\t" "[-z,--blocking <0/1>] \n\t" "[-y,--memory_type <coarse/fine/host>] \n\t" "[-h,--help]\n", basename(argv[0])); return 0; } } #ifdef MPI_SUPPORT MPI_Init(&argc, &argv); #endif return run(); } testResult_t run() { int nProcs = 1, proc = 0; int localRank = 0; char hostname[1024]; getHostName(hostname, 1024); #ifdef MPI_SUPPORT MPI_Comm_size(MPI_COMM_WORLD, &nProcs); MPI_Comm_rank(MPI_COMM_WORLD, &proc); uint64_t hostHashs[nProcs]; hostHashs[proc] = getHostHash(hostname); MPI_Allgather(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, hostHashs, sizeof(uint64_t), MPI_BYTE, MPI_COMM_WORLD); for (int p=0; p<nProcs; p++) { if (p == proc) break; if (hostHashs[p] == hostHashs[proc]) localRank++; } #endif is_main_thread = (proc == 0) ? 1 : 0; PRINT("# nThread: %d nGpus: %d minBytes: %ld maxBytes: %ld step: %ld(%s) warmupIters: %d iters: %d validation: %d \n", nThreads, nGpus, minBytes, maxBytes, (stepFactor > 1)?stepFactor:stepBytes, (stepFactor > 1)?"factor":"bytes", warmup_iters, iters, datacheck); if (blocking_coll) PRINT("# Blocking Enabled: wait for completion and barrier after each collective \n"); if (parallel_init) PRINT("# Parallel Init Enabled: threads call into NcclInitRank concurrently \n"); PRINT("#\n"); PRINT("# Using devices\n"); #define MAX_LINE 2048 char line[MAX_LINE]; int len = 0; for (int i=0; i<nThreads*nGpus; i++) { int hipDev = localRank*nThreads*nGpus+i; int rank = proc*nThreads*nGpus+i; hipDeviceProp_t prop; HIPCHECK(hipGetDeviceProperties(&prop, hipDev)); len += snprintf(line+len, MAX_LINE-len, "# Rank %2d Pid %6d on %10s device %2d [0x%02x] %s\n", rank, getpid(), hostname, hipDev, prop.pciBusID, prop.name); } #if MPI_SUPPORT char *lines = (proc == 0) ? (char *)malloc(nProcs*MAX_LINE) : NULL; // Gather all output in rank order to root (0) MPI_Gather(line, MAX_LINE, MPI_BYTE, lines, MAX_LINE, MPI_BYTE, 0, MPI_COMM_WORLD); if (proc == 0) { for (int p = 0; p < nProcs; p++) PRINT("%s", lines+MAX_LINE*p); free(lines); } #else PRINT("%s", line); #endif ncclUniqueId ncclId; if (proc == 0) { NCCLCHECK(ncclGetUniqueId(&ncclId)); } #ifdef MPI_SUPPORT MPI_Bcast(&ncclId, sizeof(ncclId), MPI_BYTE, 0, MPI_COMM_WORLD); #endif hipStream_t streams[nGpus*nThreads]; void* sendbuffs[nGpus*nThreads]; void* recvbuffs[nGpus*nThreads]; void* expected[nGpus*nThreads]; size_t sendBytes, recvBytes; ncclTestEngine.getBuffSize(&sendBytes, &recvBytes, (size_t)maxBytes, (size_t)nProcs*nGpus*nThreads); for (int i=0; i<nGpus*nThreads; i++) { HIPCHECK(hipSetDevice(localRank*nThreads*nGpus+i)); AllocateBuffs(sendbuffs+i, sendBytes, recvbuffs+i, recvBytes, expected+i, (size_t)maxBytes, nProcs*nThreads*nGpus); HIPCHECK(hipStreamCreateWithFlags(streams+i, hipStreamNonBlocking)); } //if parallel init is not selected, use main thread to initialize NCCL ncclComm_t* comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*nThreads*nGpus); if (!parallel_init) { if (nProcs == 1) { int gpuArray[nGpus*nThreads]; for (int i=0; i<nGpus*nThreads; i++) gpuArray[i] = i; NCCLCHECK(ncclCommInitAll(comms, nGpus*nThreads, gpuArray)); } else { NCCLCHECK(ncclGroupStart()); for (int i=0; i<nGpus*nThreads; i++) { HIPCHECK(hipSetDevice(localRank*nThreads*nGpus+i)); NCCLCHECK(ncclCommInitRank(comms+i, nProcs*nThreads*nGpus, ncclId, proc*nThreads*nGpus+i)); } NCCLCHECK(ncclGroupEnd()); } } int errors[nThreads]; double bw[nThreads]; double* delta; HIPCHECK(hipHostMalloc(&delta, sizeof(double)*nThreads, hipHostMallocPortable | hipHostMallocMapped)); int bw_count[nThreads]; for (int t=0; t<nThreads; t++) { bw[t] = 0.0; errors[t] = bw_count[t] = 0; } PRINT("#\n"); print_header(); int* sync = (int*)calloc(2, sizeof(int)); int* barrier = (int*)calloc(2, sizeof(int)); struct testThread threads[nThreads]; memset(threads, 0, sizeof(struct testThread)*nThreads); for (int t=nThreads-1; t>=0; t--) { threads[t].args.minbytes=minBytes; threads[t].args.maxbytes=maxBytes; threads[t].args.stepbytes=stepBytes; threads[t].args.stepfactor=stepFactor; threads[t].args.localRank = localRank; threads[t].args.nProcs=nProcs; threads[t].args.proc=proc; threads[t].args.nThreads=nThreads; threads[t].args.thread=t; threads[t].args.nGpus=nGpus; threads[t].args.sendbuffs = sendbuffs+t*nGpus; threads[t].args.recvbuffs = recvbuffs+t*nGpus; threads[t].args.expected = expected+t*nGpus; threads[t].args.ncclId = ncclId; threads[t].args.comms=comms+t*nGpus; threads[t].args.streams=streams+t*nGpus; threads[t].args.barrier = (volatile int*)barrier; threads[t].args.barrier_idx = 0; threads[t].args.sync = (volatile int*)sync; threads[t].args.sync_idx = 0; threads[t].args.deltaThreads = delta; threads[t].args.deltaHost = (delta + t); threads[t].args.delta = delta; threads[t].args.errors=errors+t; threads[t].args.bw=bw+t; threads[t].args.bw_count=bw_count+t; threads[t].func = parallel_init ? threadInit : threadRunTests; if (t) TESTCHECK(threadLaunch(threads+t)); else TESTCHECK(threads[t].func(&threads[t].args)); } // Wait for other threads and accumulate stats and errors for (int t=nThreads-1; t>=0; t--) { if (t) pthread_join(threads[t].thread, NULL); TESTCHECK(threads[t].ret); if (t) { errors[0] += errors[t]; bw[0] += bw[t]; bw_count[0] += bw_count[t]; } } #ifdef MPI_SUPPORT MPI_Allreduce(MPI_IN_PLACE, &errors[0], 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); #endif if (!parallel_init) { for(int i=0; i<nGpus*nThreads; ++i) #if NCCL_MAJOR >= 2 NCCLCHECK(ncclCommDestroy(comms[i])); #else ncclCommDestroy(comms[i]); #endif free(comms); } // Free off HIP allocated memory for (int i=0; i<nGpus*nThreads; i++) { if (memorytype == ncclHost) { HIPCHECK(hipHostFree(sendbuffs[i])); HIPCHECK(hipHostFree(recvbuffs[i])); HIPCHECK(hipHostFree(expected[i])); } else { HIPCHECK(hipFree(sendbuffs[i])); HIPCHECK(hipFree(recvbuffs[i])); HIPCHECK(hipFree(expected[i])); } } HIPCHECK(hipHostFree(delta)); char* str = getenv("NCCL_TESTS_MIN_BW"); double check_avg_bw = str ? atof(str) : -1; bw[0] /= bw_count[0]; if (datacheck) PRINT("# Errors with asterisks indicate errors that have exceeded the maximum threshold.\n"); PRINT("# Out of bounds values : %d %s\n", errors[0], errors[0] ? "FAILED" : "OK"); PRINT("# Avg bus bandwidth : %g %s\n", bw[0], check_avg_bw == -1 ? "" : (bw[0] < check_avg_bw*(0.9) ? "FAILED" : "OK")); PRINT("#\n"); #ifdef MPI_SUPPORT MPI_Finalize(); #endif // 'hip-memcheck --leak-check full' requires this hipDeviceReset(); if (errors[0] || bw[0] < check_avg_bw*(0.9)) exit(EXIT_FAILURE); else exit(EXIT_SUCCESS); }
d01f0c6c993f3500c4d14a1196c294e65428ad43.cu
/************************************************************************* * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * Modifications Copyright (c) 2019 Advanced Micro Devices, Inc. All rights reserved. * * See LICENSE.txt for license information ************************************************************************/ #include "hip/hip_runtime.h" #include "common.h" #include <pthread.h> #include <cstdio> #include <getopt.h> #include <libgen.h> #if NCCL_MAJOR >= 2 ncclDataType_t test_types[ncclNumTypes] = {ncclInt8, ncclUint8, ncclInt32, ncclUint32, ncclInt64, ncclUint64, ncclHalf, ncclFloat, ncclDouble}; const char *test_typenames[ncclNumTypes] = {"int8", "uint8", "int32", "uint32", "int64", "uint64", "half", "float", "double"}; #else ncclDataType_t test_types[ncclNumTypes] = {ncclChar, ncclInt, ncclHalf, ncclFloat, ncclDouble, ncclInt64, ncclUint64}; const char *test_typenames[ncclNumTypes] = {"char", "int", "half", "float", "double", "int64", "uint64"}; #endif ncclRedOp_t test_ops[ncclNumOps] = {ncclSum, ncclProd, ncclMax, ncclMin}; const char *test_opnames[ncclNumOps] = {"sum", "prod", "max", "min"}; const char *test_memorytypes[nccl_NUM_MTYPES] = {"coarse", "fine", "host"}; thread_local int is_main_thread = 0; // Command line parameter defaults static int nThreads = 1; static int nGpus = 1; static size_t minBytes = 32*1024*1024; static size_t maxBytes = 32*1024*1024; static size_t stepBytes = 1*1024*1024; static size_t stepFactor = 1; static int datacheck = 1; static int warmup_iters = 5; static int iters = 20; static int agg_iters = 1; static int ncclop = ncclSum; static int nccltype = ncclFloat; static int ncclroot = 0; static int parallel_init = 0; static int blocking_coll = 0; static int memorytype = 0; double parsesize(char *value) { long long int units; double size; if (strchr(value, 'G') != NULL) { units=1024*1024*1024; } else if (strchr(value, 'M') != NULL) { units=1024*1024; } else if (strchr(value, 'K') != NULL) { units=1024; } else { units=1; } size = atof(value)*units; return size; } double DeltaMaxValue(ncclDataType_t type) { switch(type) { case ncclHalf: return 1e-2; case ncclFloat: return 1e-5; case ncclDouble: return 1e-12; case ncclInt: #if NCCL_MAJOR >= 2 case ncclUint8: //case ncclInt32: case ncclUint32: #endif case ncclInt64: case ncclUint64: return 1e-200; } return 1e-200; } template<typename T> __device__ double absDiff(T a, T b) { return fabs((double)(b - a)); } template<> __device__ double absDiff<half>(half a, half b) { float x = __half2float(a); float y = __half2float(b); return fabs((double)(y-x)); } template<typename T> __device__ float toFloat(T a) { return (float)a; } template<> __device__ float toFloat(half a) { return __half2float(a); } template<typename T, int BSIZE> __global__ void deltaKern(void* A_, void* B_, size_t count, double* max) { const T* A = (const T*)A_; const T* B = (const T*)B_; __shared__ double temp[BSIZE]; int tid = threadIdx.x; double locmax = 0.0; for(int i=tid; i<count; i+=blockDim.x) { double delta = absDiff(A[i], B[i]); if( delta > locmax ) { locmax = delta; #ifdef DEBUG_PRINT if (delta > .1) printf("Error at %d/%ld : %f != %f\n", i, count, toFloat(A[i]), toFloat(B[i])); #endif } } temp[tid] = locmax; for(int stride = BSIZE/2; stride > 1; stride>>=1) { __syncthreads(); if( tid < stride ) temp[tid] = temp[tid] > temp[tid+stride] ? temp[tid] : temp[tid+stride]; } __syncthreads(); if( threadIdx.x == 0) *max = temp[0] > temp[1] ? temp[0] : temp[1]; } testResult_t CheckDelta(void* expected, void* results, size_t count, ncclDataType_t type, double* devmax) { switch (type) { case ncclHalf: hipLaunchKernelGGL((deltaKern<half, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break; case ncclFloat: hipLaunchKernelGGL((deltaKern<float, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break; case ncclDouble: hipLaunchKernelGGL((deltaKern<double, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break; case ncclChar: #if NCCL_MAJOR >= 2 case ncclUint8: #endif hipLaunchKernelGGL((deltaKern<uint8_t, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break; case ncclInt: #if NCCL_MAJOR >= 2 case ncclUint32: #endif hipLaunchKernelGGL((deltaKern<uint32_t, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break; case ncclInt64: case ncclUint64: hipLaunchKernelGGL((deltaKern<uint64_t, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break; } HIPCHECK(hipDeviceSynchronize()); return testSuccess; } // For integer values, we use values between 0 and 255 template<typename T> __device__ T testValue(const size_t offset, const int rep, const int rank) { uint8_t v = (rep+rank+offset) % 256; return (T)v; } // For floating point datatype, we use values between 0 and 1 otherwise the // Product operation will produce NaNs. template<> __device__ double testValue<double>(const size_t offset, const int rep, const int rank) { return 1.0/(1.0+(double)testValue<int>(offset, rep, rank)); } template<> __device__ float testValue<float>(const size_t offset, const int rep, const int rank) { return 1.0/(1.0+(float)testValue<int>(offset, rep, rank)); } template<> __device__ half testValue<half>(const size_t offset, const int rep, const int rank) { return __float2half(testValue<float>(offset, rep, rank)); } // Operations template<typename T> __device__ T ncclOpSum(T a, T b) { return a+b; } template<typename T> __device__ T ncclOpProd(T a, T b) { return a*b; } template<typename T> __device__ T ncclOpMax(T a, T b) { return a>b ? a : b; } template<typename T> __device__ T ncclOpMin(T a, T b) { return a<b ? a : b; } // Definitions for half template<> __device__ half ncclOpSum(half a, half b) { return __float2half(__half2float(a)+__half2float(b)); } template<> __device__ half ncclOpProd(half a, half b) { return __float2half(__half2float(a)*__half2float(b)); } template<> __device__ half ncclOpMax(half a, half b) { return __half2float(a)>__half2float(b) ? a : b; } template<> __device__ half ncclOpMin(half a, half b) { return __half2float(a)<__half2float(b) ? a : b; } template<typename T, T (*Op)(T, T)> __global__ void InitDataReduceKernel(void* data, const size_t N, const size_t offset, const int rep, const int nranks) { for (size_t o=blockIdx.x*blockDim.x+threadIdx.x; o<N; o+=gridDim.x*blockDim.x) { T val = testValue<T>(o+offset, rep, 0); for (int i=1; i<nranks; i++) { val = Op(val, testValue<T>(o+offset, rep, i)); } ((T*)data)[o] = val; } } typedef void(*redInitKern_t)(void* data, const size_t N, const size_t offset, const int rep, const int nranks); #define KERN(type, op) InitDataReduceKernel<type, op<type>> #define OPS(type) KERN(type, ncclOpSum), KERN(type, ncclOpProd), KERN(type, ncclOpMax), KERN(type, ncclOpMin) static redInitKern_t const redInitDataKerns[ncclNumOps*ncclNumTypes] = { #if NCCL_MAJOR >= 2 OPS(int8_t), OPS(uint8_t), OPS(int32_t), OPS(uint32_t), OPS(int64_t), OPS(uint64_t), OPS(half), OPS(float), OPS(double) #else OPS(char), OPS(int32_t), OPS(half), OPS(float), OPS(double), OPS(int64_t), OPS(uint64_t) #endif }; testResult_t InitDataReduce(void* data, const size_t count, const size_t offset, ncclDataType_t type, ncclRedOp_t op, const int rep, const int nranks) { dim3 grid = { 32, 1, 1 }; dim3 block = { 256, 1, 1 }; hipLaunchKernelGGL((redInitDataKerns[type*ncclNumOps+op]), grid, block, 0, 0, data, count, offset, rep, nranks); return testSuccess; } template<typename T> __global__ void InitDataKernel(void* data, const size_t N, const int rep, const int rank) { for (size_t o=blockIdx.x*blockDim.x+threadIdx.x; o<N; o+=gridDim.x*blockDim.x) ((T*)data)[o] = testValue<T>(o, rep, rank); } typedef void(*initDataKern_t)(void* data, const size_t N, const int rep, const int rank); static initDataKern_t const initDataKerns[ncclNumTypes] = { #if NCCL_MAJOR >= 2 InitDataKernel< int8_t>, InitDataKernel< uint8_t>, InitDataKernel< int32_t>, InitDataKernel<uint32_t>, InitDataKernel< int64_t>, InitDataKernel<uint64_t>, InitDataKernel< half>, InitDataKernel< float>, InitDataKernel< double> #else InitDataKernel< char>, InitDataKernel< int32_t>, InitDataKernel< half>, InitDataKernel< float>, InitDataKernel< double>, InitDataKernel< int64_t>, InitDataKernel<uint64_t>, #endif }; template<typename T> testResult_t InitDataType(void* dest, const size_t N, const int rep, const int rank) { T* ptr = (T*)dest; hipLaunchKernelGGL((InitDataKernel), dim3(16), dim3(512), 0, 0, ptr, N, rep, rank); return testSuccess; } testResult_t InitData(void* data, const size_t count, ncclDataType_t type, const int rep, const int rank) { dim3 grid = { 32, 1, 1 }; dim3 block = { 256, 1, 1 }; hipLaunchKernelGGL((initDataKerns[type]), grid, block, 0, 0, data, count, rep, rank); return testSuccess; } void Barrier(struct threadArgs* args) { while (args->barrier[args->barrier_idx] != args->thread) pthread_yield(); args->barrier[args->barrier_idx] = args->thread + 1; if (args->thread+1 == args->nThreads) { #ifdef MPI_SUPPORT MPI_Barrier(MPI_COMM_WORLD); #endif args->barrier[args->barrier_idx] = 0; } else { while (args->barrier[args->barrier_idx]) pthread_yield(); } args->barrier_idx=!args->barrier_idx; } testResult_t CheckData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place, double *delta, bool *error) { size_t count = args->expectedBytes/wordSize(type); double maxDelta = 0.0; for (int i=0; i<args->nGpus; i++) { int device; int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i); NCCLCHECK(ncclCommCuDevice(args->comms[i], &device)); HIPCHECK(hipSetDevice(device)); void *data = in_place ? ((void *)((uintptr_t)args->recvbuffs[i] + args->recvInplaceOffset*rank)) : args->recvbuffs[i]; TESTCHECK(CheckDelta(data , args->expected[i], count, type, args->delta)); maxDelta = std::max(*(args->deltaHost), maxDelta); #ifdef DEBUG_PRINT if (rank == 0) { int *expectedHost = (int *)malloc(args->expectedBytes); int *dataHost = (int *)malloc(args->expectedBytes); hipMemcpy(expectedHost, args->expected[0], args->expectedBytes, hipMemcpyDeviceToHost); printf("\n Expected: "); for(int j=0; j<args->expectedBytes/sizeof(int); j++) { printf("%d:%d ", j, expectedHost[j]); } printf("\n"); hipMemcpy(dataHost, data, args->expectedBytes, hipMemcpyDeviceToHost); printf("\n Actual: "); for (int j=0; j<args->expectedBytes/sizeof(int); j++) { printf("%d:%d ", j, dataHost[j]); } printf("\n"); free(temp); } #endif } double nranks = args->nProcs*args->nThreads*args->nGpus; if (maxDelta > DeltaMaxValue(type)*(nranks - 1)) { args->errors[0]++; *error = true; } *delta = maxDelta; return testSuccess; } testResult_t testStreamSynchronize(int ngpus, hipStream_t* streams, ncclComm_t* comms) { hipError_t hipErr; int remaining = ngpus; int* done = (int*)malloc(sizeof(int)*ngpus); memset(done, 0, sizeof(int)*ngpus); while (remaining) { int idle = 1; for (int i=0; i<ngpus; i++) { if (done[i]) continue; hipErr = hipStreamQuery(streams[i]); if (hipErr == hipSuccess) { done[i] = 1; remaining--; idle = 0; continue; } if (hipErr != hipErrorNotReady) HIPCHECK(hipErr); #if NCCL_MAJOR >= 2 #if NCCL_VERSION_CODE >= NCCL_VERSION(2,4,0) if (comms) { ncclResult_t ncclAsyncErr; NCCLCHECK(ncclCommGetAsyncError(comms[i], &ncclAsyncErr)); if (ncclAsyncErr != ncclSuccess) { // An asynchronous error happened. Stop the operation and destroy // the communicator for (int i=0; i<ngpus; i++) NCCLCHECK(ncclCommAbort(comms[i])); // Abort the perf test NCCLCHECK(ncclAsyncErr); } } #endif #endif } // We might want to let other threads (including NCCL threads) use the CPU. if (idle) pthread_yield(); } return testSuccess; } testResult_t startColl(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place, int iter) { size_t count = args->nbytes / wordSize(type); // Try to change offset for each iteration so that we avoid cache effects and catch race conditions in ptrExchange size_t totalnbytes = max(args->sendBytes, args->expectedBytes); size_t shift = (totalnbytes * iter) % args->maxbytes; if (shift + totalnbytes > args->maxbytes) shift = 0; if (args->nGpus > 1) NCCLCHECK(ncclGroupStart()); for (int i = 0; i < args->nGpus; i++) { #ifndef NCCL_MAJOR int hipDev; NCCLCHECK(ncclCommCuDevice(args->comms[i], &hipDev)); HIPCHECK(hipSetDevice(hipDev)); #endif int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i); char* recvBuff = ((char*)args->recvbuffs[i]) + shift; char* sendBuff = ((char*)args->sendbuffs[i]) + shift; TESTCHECK(args->collTest->runColl( (void*)(in_place ? recvBuff + args->sendInplaceOffset*rank : sendBuff), (void*)(in_place ? recvBuff + args->recvInplaceOffset*rank : recvBuff), count, type, op, root, args->comms[i], args->streams[i])); } if (args->nGpus > 1) NCCLCHECK(ncclGroupEnd()); if (blocking_coll) { // Complete op before returning TESTCHECK(testStreamSynchronize(args->nGpus, args->streams, args->comms)); } if (blocking_coll) Barrier(args); return testSuccess; } testResult_t completeColl(struct threadArgs* args) { if (blocking_coll) return testSuccess; TESTCHECK(testStreamSynchronize(args->nGpus, args->streams, args->comms)); return testSuccess; } testResult_t BenchTime(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place) { size_t count = args->nbytes / wordSize(type); // Sync TESTCHECK(startColl(args, type, op, root, in_place, 0)); TESTCHECK(completeColl(args)); Barrier(args); // Performance Benchmark auto start = std::chrono::high_resolution_clock::now(); for (int iter = 0; iter < iters; iter++) { if (agg_iters>1) NCCLCHECK(ncclGroupStart()); for (int aiter = 0; aiter < agg_iters; aiter++) { TESTCHECK(startColl(args, type, op, root, in_place, iter*agg_iters+aiter)); } if (agg_iters>1) NCCLCHECK(ncclGroupEnd()); } TESTCHECK(completeColl(args)); auto delta = std::chrono::high_resolution_clock::now() - start; double deltaSec = std::chrono::duration_cast<std::chrono::duration<double>>(delta).count(); deltaSec = deltaSec/(iters*agg_iters); double algBw, busBw; args->collTest->getBw(count, wordSize(type), deltaSec, &algBw, &busBw, args->nProcs*args->nThreads*args->nGpus); Barrier(args); double maxDelta = 0; bool error = false; static __thread int rep = 0; rep++; if (datacheck) { // Initialize sendbuffs, recvbuffs and expected TESTCHECK(args->collTest->initData(args, type, op, root, rep, in_place)); //test validation in single itertion, should ideally be included into the multi-iteration run TESTCHECK(startColl(args, type, op, root, in_place, 0)); TESTCHECK(completeColl(args)); TESTCHECK(CheckData(args, type, op, root, in_place, &maxDelta, &error)); //aggregate delta from all threads and procs Barrier(args); if (args->thread == 0) { for (int i=1; i<args->nThreads; i++) { maxDelta += args->deltaThreads[i]; } #ifdef MPI_SUPPORT MPI_Allreduce(MPI_IN_PLACE, &maxDelta, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &error, 1, MPI_C_BOOL, MPI_LOR, MPI_COMM_WORLD); #endif } Barrier(args); } double timeUsec = deltaSec*1.0E6; char timeStr[10]; if (timeUsec > 10000.0) { sprintf(timeStr, "%7.0f", timeUsec); } else if (timeUsec > 100.0) { sprintf(timeStr, "%7.1f", timeUsec); } else { sprintf(timeStr, "%7.2f", timeUsec); } if (datacheck) { PRINT(" %7s %6.2f %6.2f %5.0le%s", timeStr, algBw, busBw, maxDelta, error ? "*" : ""); } else { PRINT(" %7s %6.2f %6.2f %5s", timeStr, algBw, busBw, "N/A"); } args->bw[0] += busBw; args->bw_count[0]++; return testSuccess; } void setupArgs(size_t size, ncclDataType_t type, struct threadArgs* args) { int nranks = args->nProcs*args->nGpus*args->nThreads; size_t count, sendCount, recvCount, paramCount, sendInplaceOffset, recvInplaceOffset; count = size / wordSize(type); args->collTest->getCollByteCount(&sendCount, &recvCount, &paramCount, &sendInplaceOffset, &recvInplaceOffset, (size_t)count, (size_t)nranks); args->nbytes = paramCount * wordSize(type); args->sendBytes = sendCount * wordSize(type); args->expectedBytes = recvCount * wordSize(type); args->sendInplaceOffset = sendInplaceOffset * wordSize(type); args->recvInplaceOffset = recvInplaceOffset * wordSize(type); } testResult_t TimeTest(struct threadArgs* args, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName, int root) { // Warm-up for large size setupArgs(args->maxbytes, type, args); for (int iter = 0; iter < warmup_iters; iter++) { TESTCHECK(startColl(args, type, op, root, 0, iter)); } TESTCHECK(completeColl(args)); // Warm-up for small size setupArgs(args->minbytes, type, args); for (int iter = 0; iter < warmup_iters; iter++) { TESTCHECK(startColl(args, type, op, root, 0, iter)); } TESTCHECK(completeColl(args)); // Benchmark for (size_t size = args->minbytes; size<=args->maxbytes; size = ((args->stepfactor > 1) ? size*args->stepfactor : size+args->stepbytes)) { setupArgs(size, type, args); print_line_header(max(args->sendBytes, args->expectedBytes), args->nbytes / wordSize(type), typeName, opName, root); TESTCHECK(BenchTime(args, type, op, root, 0)); TESTCHECK(BenchTime(args, type, op, root, 1)); PRINT("\n"); } return testSuccess; } testResult_t threadRunTests(struct threadArgs* args) { // Set device to the first of our GPUs. If we don't do that, some operations // will be done on the current GPU (by default : 0) and if the GPUs are in // exclusive mode those operations will fail. int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus; HIPCHECK(hipSetDevice(gpuid)); TESTCHECK(ncclTestEngine.runTest(args, ncclroot, (ncclDataType_t)nccltype, test_typenames[nccltype], (ncclRedOp_t)ncclop, test_opnames[ncclop])); return testSuccess; } testResult_t threadInit(struct threadArgs* args) { char hostname[1024]; getHostName(hostname, 1024); int nranks = args->nProcs*args->nThreads*args->nGpus; //set main thread again is_main_thread = (args->proc == 0 && args->thread == 0) ? 1 : 0; NCCLCHECK(ncclGroupStart()); for (int i=0; i<args->nGpus; i++) { int rank = args->proc*args->nThreads*args->nGpus + args->thread*args->nGpus + i; int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i; HIPCHECK(hipSetDevice(gpuid)); NCCLCHECK(ncclCommInitRank(args->comms+i, nranks, args->ncclId, rank)); } NCCLCHECK(ncclGroupEnd()); TESTCHECK(threadRunTests(args)); for (int i=0; i<args->nGpus; i++) { #if NCCL_MAJOR >= 2 NCCLCHECK(ncclCommDestroy(args->comms[i])); #else ncclCommDestroy(args->comms[i]); #endif } return testSuccess; } void* threadLauncher(void* thread_) { struct testThread* thread = (struct testThread*)thread_; thread->ret = thread->func(&thread->args); return NULL; } testResult_t threadLaunch(struct testThread* thread) { pthread_create(&thread->thread, NULL, threadLauncher, thread); return testSuccess; } testResult_t AllocateBuffs(void **sendbuff, size_t sendBytes, void **recvbuff, size_t recvBytes, void **expected, size_t nbytes, int nranks) { if (memorytype == ncclFine) { HIPCHECK(hipExtMallocWithFlags(sendbuff, nbytes, hipDeviceMallocFinegrained)); HIPCHECK(hipExtMallocWithFlags(recvbuff, nbytes, hipDeviceMallocFinegrained)); HIPCHECK(hipExtMallocWithFlags(expected, recvBytes, hipDeviceMallocFinegrained)); } else if (memorytype == ncclHost) { HIPCHECK(hipHostMalloc(sendbuff, nbytes)); HIPCHECK(hipHostMalloc(recvbuff, nbytes)); HIPCHECK(hipHostMalloc(expected, recvBytes)); } else { HIPCHECK(hipMalloc(sendbuff, nbytes)); HIPCHECK(hipMalloc(recvbuff, nbytes)); HIPCHECK(hipMalloc(expected, recvBytes)); } return testSuccess; } testResult_t run(); // Main function int main(int argc, char* argv[]) { // Make sure everyline is flushed so that we see the progress of the test setlinebuf(stdout); // Parse args int longindex; static struct option longopts[] = { {"nthreads", required_argument, 0, 't'}, {"ngpus", required_argument, 0, 'g'}, {"minbytes", required_argument, 0, 'b'}, {"maxbytes", required_argument, 0, 'e'}, {"stepbytes", required_argument, 0, 'i'}, {"stepfactor", required_argument, 0, 'f'}, {"iters", required_argument, 0, 'n'}, {"agg_iters", required_argument, 0, 'm'}, {"warmup_iters", required_argument, 0, 'w'}, {"parallel_init", required_argument, 0, 'p'}, {"check", required_argument, 0, 'c'}, {"op", required_argument, 0, 'o'}, {"datatype", required_argument, 0, 'd'}, {"root", required_argument, 0, 'r'}, {"blocking", required_argument, 0, 'z'}, {"memory_type", required_argument, 0, 'y'}, {"help", no_argument, 0, 'h'} }; while(1) { int c; c = getopt_long(argc, argv, "t:g:b:e:i:f:n:m:w:p:c:o:d:r:z:y:h", longopts, &longindex); if (c == -1) break; switch(c) { case 't': nThreads = strtol(optarg, NULL, 0); break; case 'g': nGpus = strtol(optarg, NULL, 0); break; case 'b': minBytes = (size_t)parsesize(optarg); break; case 'e': maxBytes = (size_t)parsesize(optarg); break; case 'i': stepBytes = strtol(optarg, NULL, 0); break; case 'f': stepFactor = strtol(optarg, NULL, 0); break; case 'n': iters = (int)strtol(optarg, NULL, 0); break; case 'm': #if NCCL_MAJOR >= 2 && NCCL_MINOR >= 2 agg_iters = (int)strtol(optarg, NULL, 0); #else printf("Option -m not supported before NCCL 2.2. Ignoring\n"); #endif break; case 'w': warmup_iters = (int)strtol(optarg, NULL, 0); break; case 'c': datacheck = (int)strtol(optarg, NULL, 0); break; case 'p': parallel_init = (int)strtol(optarg, NULL, 0); break; case 'o': ncclop = ncclstringtoop(optarg); break; case 'd': nccltype = ncclstringtotype(optarg); break; case 'r': ncclroot = strtol(optarg, NULL, 0); break; case 'z': blocking_coll = strtol(optarg, NULL, 0); break; case 'y': memorytype = ncclstringtomtype(optarg); break; case 'h': printf("USAGE: %s \n\t" "[-t,--nthreads <num threads>] \n\t" "[-g,--ngpus <gpus per thread>] \n\t" "[-b,--minbytes <min size in bytes>] \n\t" "[-e,--maxbytes <max size in bytes>] \n\t" "[-i,--stepbytes <increment size>] \n\t" "[-f,--stepfactor <increment factor>] \n\t" "[-n,--iters <iteration count>] \n\t" "[-m,--agg_iters <aggregated iteration count>] \n\t" "[-w,--warmup_iters <warmup iteration count>] \n\t" "[-p,--parallel_init <0/1>] \n\t" "[-c,--check <0/1>] \n\t" "[-o,--op <sum/prod/min/max/all>] \n\t" "[-d,--datatype <nccltype/all>] \n\t" "[-r,--root <root>] \n\t" "[-z,--blocking <0/1>] \n\t" "[-y,--memory_type <coarse/fine/host>] \n\t" "[-h,--help]\n", basename(argv[0])); return 0; default: printf("invalid option \n"); printf("USAGE: %s \n\t" "[-t,--nthreads <num threads>] \n\t" "[-g,--ngpus <gpus per thread>] \n\t" "[-b,--minbytes <min size in bytes>] \n\t" "[-e,--maxbytes <max size in bytes>] \n\t" "[-i,--stepbytes <increment size>] \n\t" "[-f,--stepfactor <increment factor>] \n\t" "[-n,--iters <iteration count>] \n\t" "[-m,--agg_iters <aggregated iteration count>] \n\t" "[-w,--warmup_iters <warmup iteration count>] \n\t" "[-p,--parallel_init <0/1>] \n\t" "[-c,--check <0/1>] \n\t" "[-o,--op <sum/prod/min/max/all>] \n\t" "[-d,--datatype <nccltype/all>] \n\t" "[-r,--root <root>] \n\t" "[-z,--blocking <0/1>] \n\t" "[-y,--memory_type <coarse/fine/host>] \n\t" "[-h,--help]\n", basename(argv[0])); return 0; } } #ifdef MPI_SUPPORT MPI_Init(&argc, &argv); #endif return run(); } testResult_t run() { int nProcs = 1, proc = 0; int localRank = 0; char hostname[1024]; getHostName(hostname, 1024); #ifdef MPI_SUPPORT MPI_Comm_size(MPI_COMM_WORLD, &nProcs); MPI_Comm_rank(MPI_COMM_WORLD, &proc); uint64_t hostHashs[nProcs]; hostHashs[proc] = getHostHash(hostname); MPI_Allgather(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, hostHashs, sizeof(uint64_t), MPI_BYTE, MPI_COMM_WORLD); for (int p=0; p<nProcs; p++) { if (p == proc) break; if (hostHashs[p] == hostHashs[proc]) localRank++; } #endif is_main_thread = (proc == 0) ? 1 : 0; PRINT("# nThread: %d nGpus: %d minBytes: %ld maxBytes: %ld step: %ld(%s) warmupIters: %d iters: %d validation: %d \n", nThreads, nGpus, minBytes, maxBytes, (stepFactor > 1)?stepFactor:stepBytes, (stepFactor > 1)?"factor":"bytes", warmup_iters, iters, datacheck); if (blocking_coll) PRINT("# Blocking Enabled: wait for completion and barrier after each collective \n"); if (parallel_init) PRINT("# Parallel Init Enabled: threads call into NcclInitRank concurrently \n"); PRINT("#\n"); PRINT("# Using devices\n"); #define MAX_LINE 2048 char line[MAX_LINE]; int len = 0; for (int i=0; i<nThreads*nGpus; i++) { int hipDev = localRank*nThreads*nGpus+i; int rank = proc*nThreads*nGpus+i; hipDeviceProp_t prop; HIPCHECK(hipGetDeviceProperties(&prop, hipDev)); len += snprintf(line+len, MAX_LINE-len, "# Rank %2d Pid %6d on %10s device %2d [0x%02x] %s\n", rank, getpid(), hostname, hipDev, prop.pciBusID, prop.name); } #if MPI_SUPPORT char *lines = (proc == 0) ? (char *)malloc(nProcs*MAX_LINE) : NULL; // Gather all output in rank order to root (0) MPI_Gather(line, MAX_LINE, MPI_BYTE, lines, MAX_LINE, MPI_BYTE, 0, MPI_COMM_WORLD); if (proc == 0) { for (int p = 0; p < nProcs; p++) PRINT("%s", lines+MAX_LINE*p); free(lines); } #else PRINT("%s", line); #endif ncclUniqueId ncclId; if (proc == 0) { NCCLCHECK(ncclGetUniqueId(&ncclId)); } #ifdef MPI_SUPPORT MPI_Bcast(&ncclId, sizeof(ncclId), MPI_BYTE, 0, MPI_COMM_WORLD); #endif hipStream_t streams[nGpus*nThreads]; void* sendbuffs[nGpus*nThreads]; void* recvbuffs[nGpus*nThreads]; void* expected[nGpus*nThreads]; size_t sendBytes, recvBytes; ncclTestEngine.getBuffSize(&sendBytes, &recvBytes, (size_t)maxBytes, (size_t)nProcs*nGpus*nThreads); for (int i=0; i<nGpus*nThreads; i++) { HIPCHECK(hipSetDevice(localRank*nThreads*nGpus+i)); AllocateBuffs(sendbuffs+i, sendBytes, recvbuffs+i, recvBytes, expected+i, (size_t)maxBytes, nProcs*nThreads*nGpus); HIPCHECK(hipStreamCreateWithFlags(streams+i, hipStreamNonBlocking)); } //if parallel init is not selected, use main thread to initialize NCCL ncclComm_t* comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*nThreads*nGpus); if (!parallel_init) { if (nProcs == 1) { int gpuArray[nGpus*nThreads]; for (int i=0; i<nGpus*nThreads; i++) gpuArray[i] = i; NCCLCHECK(ncclCommInitAll(comms, nGpus*nThreads, gpuArray)); } else { NCCLCHECK(ncclGroupStart()); for (int i=0; i<nGpus*nThreads; i++) { HIPCHECK(hipSetDevice(localRank*nThreads*nGpus+i)); NCCLCHECK(ncclCommInitRank(comms+i, nProcs*nThreads*nGpus, ncclId, proc*nThreads*nGpus+i)); } NCCLCHECK(ncclGroupEnd()); } } int errors[nThreads]; double bw[nThreads]; double* delta; HIPCHECK(hipHostMalloc(&delta, sizeof(double)*nThreads, hipHostMallocPortable | hipHostMallocMapped)); int bw_count[nThreads]; for (int t=0; t<nThreads; t++) { bw[t] = 0.0; errors[t] = bw_count[t] = 0; } PRINT("#\n"); print_header(); int* sync = (int*)calloc(2, sizeof(int)); int* barrier = (int*)calloc(2, sizeof(int)); struct testThread threads[nThreads]; memset(threads, 0, sizeof(struct testThread)*nThreads); for (int t=nThreads-1; t>=0; t--) { threads[t].args.minbytes=minBytes; threads[t].args.maxbytes=maxBytes; threads[t].args.stepbytes=stepBytes; threads[t].args.stepfactor=stepFactor; threads[t].args.localRank = localRank; threads[t].args.nProcs=nProcs; threads[t].args.proc=proc; threads[t].args.nThreads=nThreads; threads[t].args.thread=t; threads[t].args.nGpus=nGpus; threads[t].args.sendbuffs = sendbuffs+t*nGpus; threads[t].args.recvbuffs = recvbuffs+t*nGpus; threads[t].args.expected = expected+t*nGpus; threads[t].args.ncclId = ncclId; threads[t].args.comms=comms+t*nGpus; threads[t].args.streams=streams+t*nGpus; threads[t].args.barrier = (volatile int*)barrier; threads[t].args.barrier_idx = 0; threads[t].args.sync = (volatile int*)sync; threads[t].args.sync_idx = 0; threads[t].args.deltaThreads = delta; threads[t].args.deltaHost = (delta + t); threads[t].args.delta = delta; threads[t].args.errors=errors+t; threads[t].args.bw=bw+t; threads[t].args.bw_count=bw_count+t; threads[t].func = parallel_init ? threadInit : threadRunTests; if (t) TESTCHECK(threadLaunch(threads+t)); else TESTCHECK(threads[t].func(&threads[t].args)); } // Wait for other threads and accumulate stats and errors for (int t=nThreads-1; t>=0; t--) { if (t) pthread_join(threads[t].thread, NULL); TESTCHECK(threads[t].ret); if (t) { errors[0] += errors[t]; bw[0] += bw[t]; bw_count[0] += bw_count[t]; } } #ifdef MPI_SUPPORT MPI_Allreduce(MPI_IN_PLACE, &errors[0], 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); #endif if (!parallel_init) { for(int i=0; i<nGpus*nThreads; ++i) #if NCCL_MAJOR >= 2 NCCLCHECK(ncclCommDestroy(comms[i])); #else ncclCommDestroy(comms[i]); #endif free(comms); } // Free off HIP allocated memory for (int i=0; i<nGpus*nThreads; i++) { if (memorytype == ncclHost) { HIPCHECK(hipHostFree(sendbuffs[i])); HIPCHECK(hipHostFree(recvbuffs[i])); HIPCHECK(hipHostFree(expected[i])); } else { HIPCHECK(hipFree(sendbuffs[i])); HIPCHECK(hipFree(recvbuffs[i])); HIPCHECK(hipFree(expected[i])); } } HIPCHECK(hipHostFree(delta)); char* str = getenv("NCCL_TESTS_MIN_BW"); double check_avg_bw = str ? atof(str) : -1; bw[0] /= bw_count[0]; if (datacheck) PRINT("# Errors with asterisks indicate errors that have exceeded the maximum threshold.\n"); PRINT("# Out of bounds values : %d %s\n", errors[0], errors[0] ? "FAILED" : "OK"); PRINT("# Avg bus bandwidth : %g %s\n", bw[0], check_avg_bw == -1 ? "" : (bw[0] < check_avg_bw*(0.9) ? "FAILED" : "OK")); PRINT("#\n"); #ifdef MPI_SUPPORT MPI_Finalize(); #endif // 'hip-memcheck --leak-check full' requires this hipDeviceReset(); if (errors[0] || bw[0] < check_avg_bw*(0.9)) exit(EXIT_FAILURE); else exit(EXIT_SUCCESS); }
8c5d9431c7549f1c60f67890a94e55c1c7b93a95.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "caffe/filler.hpp" #include "caffe/layers/channel_normalize_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { // divid a matrix with vector template <typename Dtype> __global__ void DivBsx(const int nthreads, const Dtype* A, const Dtype* v, const int rows, const int cols, const CBLAS_TRANSPOSE trans, Dtype* B) { CUDA_KERNEL_LOOP(index, nthreads) { int c = index % cols; int r = (index / cols) % rows; if (trans == CblasNoTrans) { B[index] = A[index] / v[c]; } else { B[index] = A[index] / v[r]; } } } template <typename Dtype> __global__ void MulBsx(const int nthreads, const Dtype* A, const Dtype* v, const int rows, const int cols, const CBLAS_TRANSPOSE trans, Dtype* B) { CUDA_KERNEL_LOOP(index, nthreads) { int c = index % cols; int r = (index / cols) % rows; if (trans == CblasNoTrans) { B[index] = A[index] * v[c]; } else { B[index] = A[index] * v[r]; } } } template <typename Dtype> void ChannelNormalizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* buffer_data = buffer_.mutable_gpu_data(); Dtype* norm_data; if (across_spatial_) { // need to index it norm_data = norm_.mutable_cpu_data(); } else { norm_data = norm_.mutable_gpu_data(); // add eps to avoid overflow caffe_gpu_set<Dtype>(norm_.count(), Dtype(eps_), norm_data); } const Dtype* scale; if (channel_shared_) { scale = this->blobs_[0]->cpu_data(); } else { scale = this->blobs_[0]->gpu_data(); } const Dtype* sum_channel_multiplier = sum_channel_multiplier_.gpu_data(); int num = bottom[0]->num(); int dim = bottom[0]->count() / num; int spatial_dim = bottom[0]->height() * bottom[0]->width(); int channels = bottom[0]->channels(); for (int n = 0; n < num; ++n) { caffe_gpu_powx<Dtype>(dim, bottom_data, Dtype(2), buffer_data); if (across_spatial_) { Dtype normsqr; caffe_gpu_asum<Dtype>(dim, buffer_data, &normsqr); // add eps to avoid overflow norm_data[n] = pow(normsqr+eps_, Dtype(0.5)); caffe_gpu_scale<Dtype>(dim, Dtype(1.0 / norm_data[n]), bottom_data, top_data); } else { // compute norm caffe_gpu_gemv<Dtype>(CblasTrans, channels, spatial_dim, Dtype(1), buffer_data, sum_channel_multiplier, Dtype(1), norm_data); caffe_gpu_powx<Dtype>(spatial_dim, norm_data, Dtype(0.5), norm_data); // scale the layer // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( DivBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, dim, bottom_data, norm_data, channels, spatial_dim, CblasNoTrans, top_data); CUDA_POST_KERNEL_CHECK; norm_data += spatial_dim; } // scale the output if (channel_shared_) { caffe_gpu_scal<Dtype>(dim, scale[0], top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MulBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, dim, top_data, scale, channels, spatial_dim, CblasTrans, top_data); CUDA_POST_KERNEL_CHECK; } bottom_data += dim; top_data += dim; } } template <typename Dtype> void ChannelNormalizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* bottom_data = bottom[0]->mutable_gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* norm_data; if (across_spatial_) { // need to index it norm_data = norm_.cpu_data(); } else { norm_data = norm_.gpu_data(); } const Dtype* scale; if (channel_shared_) { scale = this->blobs_[0]->cpu_data(); } else { scale = this->blobs_[0]->gpu_data(); } Dtype* buffer_data = buffer_.mutable_gpu_data(); Dtype* buffer_channel = buffer_channel_.mutable_gpu_data(); Dtype* buffer_spatial = buffer_spatial_.mutable_gpu_data(); const Dtype* sum_channel_multiplier = sum_channel_multiplier_.gpu_data(); const Dtype* sum_spatial_multiplier = sum_spatial_multiplier_.gpu_data(); int count = top[0]->count(); int num = top[0]->num(); int dim = count / num; int spatial_dim = top[0]->height() * top[0]->width(); int channels = top[0]->channels(); // Propagate to param if (this->param_propagate_down_[0]) { if (channel_shared_) { Dtype* scale_diff = this->blobs_[0]->mutable_cpu_diff(); Dtype a; caffe_gpu_dot<Dtype>(count, top_data, top_diff, &a); scale_diff[0] += a / scale[0]; } else { Dtype* scale_diff = this->blobs_[0]->mutable_gpu_diff(); for (int n = 0; n < num; ++n) { // compute a caffe_gpu_mul<Dtype>(dim, top_data+n*dim, top_diff+n*dim, buffer_data); caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, spatial_dim, Dtype(1), buffer_data, sum_spatial_multiplier, Dtype(0), buffer_channel); // store a / scale[i] in buffer_data temporary caffe_gpu_div<Dtype>(channels, buffer_channel, scale, buffer_channel); caffe_gpu_add<Dtype>(channels, buffer_channel, scale_diff, scale_diff); } } } // Propagate to bottom if (propagate_down[0]) { for (int n = 0; n < num; ++n) { if (across_spatial_) { Dtype a; caffe_gpu_dot<Dtype>(dim, bottom_data, top_diff, &a); caffe_gpu_scale<Dtype>(dim, a / norm_data[n] / norm_data[n], bottom_data, bottom_diff); caffe_gpu_sub<Dtype>(dim, top_diff, bottom_diff, bottom_diff); caffe_gpu_scale<Dtype>(dim, Dtype(1.0 / norm_data[n]), bottom_diff, bottom_diff); } else { // dot product between bottom_data and top_diff caffe_gpu_mul<Dtype>(dim, bottom_data, top_diff, buffer_data); caffe_gpu_gemv<Dtype>(CblasTrans, channels, spatial_dim, Dtype(1), buffer_data, sum_channel_multiplier, Dtype(0), buffer_spatial); // scale botom_diff // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MulBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, dim, bottom_data, buffer_spatial, channels, spatial_dim, CblasNoTrans, bottom_diff); CUDA_POST_KERNEL_CHECK; // divide by square of norm caffe_gpu_powx<Dtype>(spatial_dim, norm_data, Dtype(2), buffer_spatial); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( DivBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, dim, bottom_diff, buffer_spatial, channels, spatial_dim, CblasNoTrans, bottom_diff); CUDA_POST_KERNEL_CHECK; caffe_gpu_sub<Dtype>(dim, top_diff, bottom_diff, bottom_diff); // divide by norm // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( DivBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, dim, bottom_diff, norm_data, channels, spatial_dim, CblasNoTrans, bottom_diff); CUDA_POST_KERNEL_CHECK; norm_data += spatial_dim; } // scale the diff if (channel_shared_) { caffe_gpu_scal<Dtype>(dim, scale[0], bottom_diff); } else { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MulBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, dim, bottom_diff, scale, channels, spatial_dim, CblasTrans, bottom_diff); CUDA_POST_KERNEL_CHECK; } bottom_data += dim; top_diff += dim; bottom_diff += dim; } } } INSTANTIATE_LAYER_GPU_FUNCS(ChannelNormalizeLayer); } // namespace caffe
8c5d9431c7549f1c60f67890a94e55c1c7b93a95.cu
#include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "caffe/filler.hpp" #include "caffe/layers/channel_normalize_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { // divid a matrix with vector template <typename Dtype> __global__ void DivBsx(const int nthreads, const Dtype* A, const Dtype* v, const int rows, const int cols, const CBLAS_TRANSPOSE trans, Dtype* B) { CUDA_KERNEL_LOOP(index, nthreads) { int c = index % cols; int r = (index / cols) % rows; if (trans == CblasNoTrans) { B[index] = A[index] / v[c]; } else { B[index] = A[index] / v[r]; } } } template <typename Dtype> __global__ void MulBsx(const int nthreads, const Dtype* A, const Dtype* v, const int rows, const int cols, const CBLAS_TRANSPOSE trans, Dtype* B) { CUDA_KERNEL_LOOP(index, nthreads) { int c = index % cols; int r = (index / cols) % rows; if (trans == CblasNoTrans) { B[index] = A[index] * v[c]; } else { B[index] = A[index] * v[r]; } } } template <typename Dtype> void ChannelNormalizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* buffer_data = buffer_.mutable_gpu_data(); Dtype* norm_data; if (across_spatial_) { // need to index it norm_data = norm_.mutable_cpu_data(); } else { norm_data = norm_.mutable_gpu_data(); // add eps to avoid overflow caffe_gpu_set<Dtype>(norm_.count(), Dtype(eps_), norm_data); } const Dtype* scale; if (channel_shared_) { scale = this->blobs_[0]->cpu_data(); } else { scale = this->blobs_[0]->gpu_data(); } const Dtype* sum_channel_multiplier = sum_channel_multiplier_.gpu_data(); int num = bottom[0]->num(); int dim = bottom[0]->count() / num; int spatial_dim = bottom[0]->height() * bottom[0]->width(); int channels = bottom[0]->channels(); for (int n = 0; n < num; ++n) { caffe_gpu_powx<Dtype>(dim, bottom_data, Dtype(2), buffer_data); if (across_spatial_) { Dtype normsqr; caffe_gpu_asum<Dtype>(dim, buffer_data, &normsqr); // add eps to avoid overflow norm_data[n] = pow(normsqr+eps_, Dtype(0.5)); caffe_gpu_scale<Dtype>(dim, Dtype(1.0 / norm_data[n]), bottom_data, top_data); } else { // compute norm caffe_gpu_gemv<Dtype>(CblasTrans, channels, spatial_dim, Dtype(1), buffer_data, sum_channel_multiplier, Dtype(1), norm_data); caffe_gpu_powx<Dtype>(spatial_dim, norm_data, Dtype(0.5), norm_data); // scale the layer // NOLINT_NEXT_LINE(whitespace/operators) DivBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>( dim, bottom_data, norm_data, channels, spatial_dim, CblasNoTrans, top_data); CUDA_POST_KERNEL_CHECK; norm_data += spatial_dim; } // scale the output if (channel_shared_) { caffe_gpu_scal<Dtype>(dim, scale[0], top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) MulBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>( dim, top_data, scale, channels, spatial_dim, CblasTrans, top_data); CUDA_POST_KERNEL_CHECK; } bottom_data += dim; top_data += dim; } } template <typename Dtype> void ChannelNormalizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* bottom_data = bottom[0]->mutable_gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* norm_data; if (across_spatial_) { // need to index it norm_data = norm_.cpu_data(); } else { norm_data = norm_.gpu_data(); } const Dtype* scale; if (channel_shared_) { scale = this->blobs_[0]->cpu_data(); } else { scale = this->blobs_[0]->gpu_data(); } Dtype* buffer_data = buffer_.mutable_gpu_data(); Dtype* buffer_channel = buffer_channel_.mutable_gpu_data(); Dtype* buffer_spatial = buffer_spatial_.mutable_gpu_data(); const Dtype* sum_channel_multiplier = sum_channel_multiplier_.gpu_data(); const Dtype* sum_spatial_multiplier = sum_spatial_multiplier_.gpu_data(); int count = top[0]->count(); int num = top[0]->num(); int dim = count / num; int spatial_dim = top[0]->height() * top[0]->width(); int channels = top[0]->channels(); // Propagate to param if (this->param_propagate_down_[0]) { if (channel_shared_) { Dtype* scale_diff = this->blobs_[0]->mutable_cpu_diff(); Dtype a; caffe_gpu_dot<Dtype>(count, top_data, top_diff, &a); scale_diff[0] += a / scale[0]; } else { Dtype* scale_diff = this->blobs_[0]->mutable_gpu_diff(); for (int n = 0; n < num; ++n) { // compute a caffe_gpu_mul<Dtype>(dim, top_data+n*dim, top_diff+n*dim, buffer_data); caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, spatial_dim, Dtype(1), buffer_data, sum_spatial_multiplier, Dtype(0), buffer_channel); // store a / scale[i] in buffer_data temporary caffe_gpu_div<Dtype>(channels, buffer_channel, scale, buffer_channel); caffe_gpu_add<Dtype>(channels, buffer_channel, scale_diff, scale_diff); } } } // Propagate to bottom if (propagate_down[0]) { for (int n = 0; n < num; ++n) { if (across_spatial_) { Dtype a; caffe_gpu_dot<Dtype>(dim, bottom_data, top_diff, &a); caffe_gpu_scale<Dtype>(dim, a / norm_data[n] / norm_data[n], bottom_data, bottom_diff); caffe_gpu_sub<Dtype>(dim, top_diff, bottom_diff, bottom_diff); caffe_gpu_scale<Dtype>(dim, Dtype(1.0 / norm_data[n]), bottom_diff, bottom_diff); } else { // dot product between bottom_data and top_diff caffe_gpu_mul<Dtype>(dim, bottom_data, top_diff, buffer_data); caffe_gpu_gemv<Dtype>(CblasTrans, channels, spatial_dim, Dtype(1), buffer_data, sum_channel_multiplier, Dtype(0), buffer_spatial); // scale botom_diff // NOLINT_NEXT_LINE(whitespace/operators) MulBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>( dim, bottom_data, buffer_spatial, channels, spatial_dim, CblasNoTrans, bottom_diff); CUDA_POST_KERNEL_CHECK; // divide by square of norm caffe_gpu_powx<Dtype>(spatial_dim, norm_data, Dtype(2), buffer_spatial); // NOLINT_NEXT_LINE(whitespace/operators) DivBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>( dim, bottom_diff, buffer_spatial, channels, spatial_dim, CblasNoTrans, bottom_diff); CUDA_POST_KERNEL_CHECK; caffe_gpu_sub<Dtype>(dim, top_diff, bottom_diff, bottom_diff); // divide by norm // NOLINT_NEXT_LINE(whitespace/operators) DivBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>( dim, bottom_diff, norm_data, channels, spatial_dim, CblasNoTrans, bottom_diff); CUDA_POST_KERNEL_CHECK; norm_data += spatial_dim; } // scale the diff if (channel_shared_) { caffe_gpu_scal<Dtype>(dim, scale[0], bottom_diff); } else { // NOLINT_NEXT_LINE(whitespace/operators) MulBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>( dim, bottom_diff, scale, channels, spatial_dim, CblasTrans, bottom_diff); CUDA_POST_KERNEL_CHECK; } bottom_data += dim; top_diff += dim; bottom_diff += dim; } } } INSTANTIATE_LAYER_GPU_FUNCS(ChannelNormalizeLayer); } // namespace caffe
e54abd9d227c2c59e4b30fdeb4bd67e04be7d114.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/zmergebicgstab2.cu, normal z -> d, Mon Jun 25 18:24:25 2018 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 #define PRECISION_d // These routines merge multiple kernels from dmergebicgstab into one // This is the code used for the ASHES2014 paper // "Accelerating Krylov Subspace Solvers on Graphics Processing Units". // notice that only CSR format is supported so far. // accelerated reduction for one vector __global__ void magma_dreduce_kernel_spmv1( int Gs, int n, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_D_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_D_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } __global__ void magma_dbicgmerge_spmv1_kernel( int n, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double * p, double * r, double * v, double * vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; if( i<n ){ double dot = MAGMA_D_ZERO; int start = drowptr[ i ]; int end = drowptr[ i+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * p[ dcolind[j] ]; v[ i ] = dot; } __syncthreads(); temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_D_MAKE( 0.0, 0.0); __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } __global__ void magma_dbicgstab_alphakernel( double * skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ double tmp = skp[0]; skp[0] = skp[4]/tmp; } } /** Purpose ------- Merges the first SpmV using CSR with the dot product and the computation of alpha Arguments --------- @param[in] A magma_d_matrix system matrix @param[in] d1 magmaDouble_ptr temporary vector @param[in] d2 magmaDouble_ptr temporary vector @param[in] dp magmaDouble_ptr input vector p @param[in] dr magmaDouble_ptr input vector r @param[in] dv magmaDouble_ptr output vector v @param[in,out] skp magmaDouble_ptr array for parameters ( skp[0]=alpha ) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dbicgmerge_spmv1( magma_d_matrix A, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr dp, magmaDouble_ptr dr, magmaDouble_ptr dv, magmaDouble_ptr skp, magma_queue_t queue ) { int n = A.num_rows; int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = local_block_size * sizeof( double ); magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; if ( A.storage_type == Magma_CSR) hipLaunchKernelGGL(( magma_dbicgmerge_spmv1_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(), n, A.dval, A.drow, A.dcol, dp, dr, dv, d1 ); else printf("error: only CSR format supported.\n"); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_dreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(), Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_dcopyvector( 1, aux1, 1, skp, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_dbicgstab_alphakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // accelerated block reduction for multiple vectors __global__ void magma_dreduce_kernel_spmv2( int Gs, int n, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<2; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_D_MAKE( 0.0, 0.0); while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_D_MAKE( 0.0, 0.0); i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } __global__ void magma_dbicgmerge_spmv2_kernel( int n, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double * s, double * t, double * vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; if( i<n ){ double dot = MAGMA_D_ZERO; int start = drowptr[ i ]; int end = drowptr[ i+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * s[ dcolind[j] ]; t[ i ] = dot; } __syncthreads(); // 2 vectors if (i<n){ double tmp2 = t[i]; temp[Idx] = s[i] * tmp2; temp[Idx+blockDim.x] = tmp2 * tmp2; } else { for( j=0; j<2; j++) temp[Idx+j*blockDim.x] = MAGMA_D_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } __global__ void magma_dbicgstab_omegakernel( double * skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ skp[2] = skp[6]/skp[7]; skp[3] = skp[4]; } } /** Purpose ------- Merges the second SpmV using CSR with the dot product and the computation of omega Arguments --------- @param[in] A magma_d_matrix input matrix @param[in] d1 magmaDouble_ptr temporary vector @param[in] d2 magmaDouble_ptr temporary vector @param[in] ds magmaDouble_ptr input vector s @param[in] dt magmaDouble_ptr output vector t @param[in,out] skp magmaDouble_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dbicgmerge_spmv2( magma_d_matrix A, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr ds, magmaDouble_ptr dt, magmaDouble_ptr skp, magma_queue_t queue ) { int n = A.num_rows; int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( double ); magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; if ( A.storage_type == Magma_CSR) hipLaunchKernelGGL(( magma_dbicgmerge_spmv2_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(), n, A.dval, A.drow, A.dcol, ds, dt, d1 ); else printf("error: only CSR format supported.\n"); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_dreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(), Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_dcopyvector( 1, aux1, 1, skp+6, 1, queue ); magma_dcopyvector( 1, aux1+n, 1, skp+7, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_dbicgstab_omegakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ __global__ void magma_dbicgmerge_xrbeta_kernel( int n, double * rr, double * r, double * p, double * s, double * t, double * x, double * skp, double * vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; double alpha=skp[0]; double omega=skp[2]; if( i<n ){ double sl; sl = s[i]; x[i] = x[i] + alpha * p[i] + omega * sl; r[i] = sl - omega * t[i]; } __syncthreads(); // 2 vectors if (i<n){ double tmp2 = r[i]; temp[Idx] = rr[i] * tmp2; temp[Idx+blockDim.x] = tmp2 * tmp2; } else { for( j=0; j<2; j++) temp[Idx+j*blockDim.x] = MAGMA_D_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } __global__ void magma_dbicgstab_betakernel( double * skp ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ double tmp1 = skp[4]/skp[3]; double tmp2 = skp[0] / skp[2]; skp[1] = tmp1*tmp2; } } /** Purpose ------- Merges the second SpmV using CSR with the dot product and the computation of omega Arguments --------- @param[in] n int dimension n @param[in] d1 magmaDouble_ptr temporary vector @param[in] d2 magmaDouble_ptr temporary vector @param[in] rr magmaDouble_ptr input vector rr @param[in] r magmaDouble_ptr input/output vector r @param[in] p magmaDouble_ptr input vector p @param[in] s magmaDouble_ptr input vector s @param[in] t magmaDouble_ptr input vector t @param[out] x magmaDouble_ptr output vector x @param[in] skp magmaDouble_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dbicgmerge_xrbeta( magma_int_t n, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr rr, magmaDouble_ptr r, magmaDouble_ptr p, magmaDouble_ptr s, magmaDouble_ptr t, magmaDouble_ptr x, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( double ); magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; hipLaunchKernelGGL(( magma_dbicgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(), n, rr, r, p, s, t, x, skp, d1); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_dreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(), Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_dcopyvector( 1, aux1, 1, skp+4, 1, queue ); magma_dcopyvector( 1, aux1+n, 1, skp+5, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_dbicgstab_betakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */
e54abd9d227c2c59e4b30fdeb4bd67e04be7d114.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/zmergebicgstab2.cu, normal z -> d, Mon Jun 25 18:24:25 2018 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 #define PRECISION_d // These routines merge multiple kernels from dmergebicgstab into one // This is the code used for the ASHES2014 paper // "Accelerating Krylov Subspace Solvers on Graphics Processing Units". // notice that only CSR format is supported so far. // accelerated reduction for one vector __global__ void magma_dreduce_kernel_spmv1( int Gs, int n, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_D_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_D_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } __global__ void magma_dbicgmerge_spmv1_kernel( int n, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double * p, double * r, double * v, double * vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; if( i<n ){ double dot = MAGMA_D_ZERO; int start = drowptr[ i ]; int end = drowptr[ i+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * p[ dcolind[j] ]; v[ i ] = dot; } __syncthreads(); temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_D_MAKE( 0.0, 0.0); __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } __global__ void magma_dbicgstab_alphakernel( double * skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ double tmp = skp[0]; skp[0] = skp[4]/tmp; } } /** Purpose ------- Merges the first SpmV using CSR with the dot product and the computation of alpha Arguments --------- @param[in] A magma_d_matrix system matrix @param[in] d1 magmaDouble_ptr temporary vector @param[in] d2 magmaDouble_ptr temporary vector @param[in] dp magmaDouble_ptr input vector p @param[in] dr magmaDouble_ptr input vector r @param[in] dv magmaDouble_ptr output vector v @param[in,out] skp magmaDouble_ptr array for parameters ( skp[0]=alpha ) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dbicgmerge_spmv1( magma_d_matrix A, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr dp, magmaDouble_ptr dr, magmaDouble_ptr dv, magmaDouble_ptr skp, magma_queue_t queue ) { int n = A.num_rows; int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = local_block_size * sizeof( double ); magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; if ( A.storage_type == Magma_CSR) magma_dbicgmerge_spmv1_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>> ( n, A.dval, A.drow, A.dcol, dp, dr, dv, d1 ); else printf("error: only CSR format supported.\n"); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_dreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_dcopyvector( 1, aux1, 1, skp, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_dbicgstab_alphakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // accelerated block reduction for multiple vectors __global__ void magma_dreduce_kernel_spmv2( int Gs, int n, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<2; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_D_MAKE( 0.0, 0.0); while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_D_MAKE( 0.0, 0.0); i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } __global__ void magma_dbicgmerge_spmv2_kernel( int n, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double * s, double * t, double * vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; if( i<n ){ double dot = MAGMA_D_ZERO; int start = drowptr[ i ]; int end = drowptr[ i+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * s[ dcolind[j] ]; t[ i ] = dot; } __syncthreads(); // 2 vectors if (i<n){ double tmp2 = t[i]; temp[Idx] = s[i] * tmp2; temp[Idx+blockDim.x] = tmp2 * tmp2; } else { for( j=0; j<2; j++) temp[Idx+j*blockDim.x] = MAGMA_D_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } __global__ void magma_dbicgstab_omegakernel( double * skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ skp[2] = skp[6]/skp[7]; skp[3] = skp[4]; } } /** Purpose ------- Merges the second SpmV using CSR with the dot product and the computation of omega Arguments --------- @param[in] A magma_d_matrix input matrix @param[in] d1 magmaDouble_ptr temporary vector @param[in] d2 magmaDouble_ptr temporary vector @param[in] ds magmaDouble_ptr input vector s @param[in] dt magmaDouble_ptr output vector t @param[in,out] skp magmaDouble_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dbicgmerge_spmv2( magma_d_matrix A, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr ds, magmaDouble_ptr dt, magmaDouble_ptr skp, magma_queue_t queue ) { int n = A.num_rows; int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( double ); magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; if ( A.storage_type == Magma_CSR) magma_dbicgmerge_spmv2_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>> ( n, A.dval, A.drow, A.dcol, ds, dt, d1 ); else printf("error: only CSR format supported.\n"); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_dreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_dcopyvector( 1, aux1, 1, skp+6, 1, queue ); magma_dcopyvector( 1, aux1+n, 1, skp+7, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_dbicgstab_omegakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ __global__ void magma_dbicgmerge_xrbeta_kernel( int n, double * rr, double * r, double * p, double * s, double * t, double * x, double * skp, double * vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; double alpha=skp[0]; double omega=skp[2]; if( i<n ){ double sl; sl = s[i]; x[i] = x[i] + alpha * p[i] + omega * sl; r[i] = sl - omega * t[i]; } __syncthreads(); // 2 vectors if (i<n){ double tmp2 = r[i]; temp[Idx] = rr[i] * tmp2; temp[Idx+blockDim.x] = tmp2 * tmp2; } else { for( j=0; j<2; j++) temp[Idx+j*blockDim.x] = MAGMA_D_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } __global__ void magma_dbicgstab_betakernel( double * skp ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ double tmp1 = skp[4]/skp[3]; double tmp2 = skp[0] / skp[2]; skp[1] = tmp1*tmp2; } } /** Purpose ------- Merges the second SpmV using CSR with the dot product and the computation of omega Arguments --------- @param[in] n int dimension n @param[in] d1 magmaDouble_ptr temporary vector @param[in] d2 magmaDouble_ptr temporary vector @param[in] rr magmaDouble_ptr input vector rr @param[in] r magmaDouble_ptr input/output vector r @param[in] p magmaDouble_ptr input vector p @param[in] s magmaDouble_ptr input vector s @param[in] t magmaDouble_ptr input vector t @param[out] x magmaDouble_ptr output vector x @param[in] skp magmaDouble_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dbicgmerge_xrbeta( magma_int_t n, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr rr, magmaDouble_ptr r, magmaDouble_ptr p, magmaDouble_ptr s, magmaDouble_ptr t, magmaDouble_ptr x, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( double ); magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; magma_dbicgmerge_xrbeta_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>> ( n, rr, r, p, s, t, x, skp, d1); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_dreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_dcopyvector( 1, aux1, 1, skp+4, 1, queue ); magma_dcopyvector( 1, aux1+n, 1, skp+5, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_dbicgstab_betakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */
d7aeaedd150fd9e76af568adc1e7e3d422fea5ac.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include <iostream> #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <hip/hip_runtime_api.h> #include "THH/THH.h" #include <ATen/hip/HIPContext.h> #include <torch/extension.h> #include <math.h> #include "softmax_dropout.h" // symbol to be automatically resolved by PyTorch libs extern THCState *state; namespace multihead_attn { namespace encdec { namespace cublas_gemmex { std::vector<torch::Tensor> fwd_cuda( bool is_training, int heads, torch::Tensor const& inputs_q, torch::Tensor const& inputs_kv, torch::Tensor const& input_weights_q, torch::Tensor const& input_weights_kv, torch::Tensor const& output_weights, torch::Tensor const& pad_mask, float dropout_prob ) { const int embed_dim = inputs_q.size(2); const int sequences = inputs_q.size(1); const int q_seq_len = inputs_q.size(0); const int k_seq_len = inputs_kv.size(0); const int batches_q = sequences * q_seq_len; const int batches_kv = sequences * k_seq_len; const int head_dim = embed_dim / heads; const int output_lin_q_dim = embed_dim; const int output_lin_kv_dim = 2 * embed_dim; const int attn_batches = heads * sequences; const int lead_dim_q = attn_batches * head_dim; const int lead_dim_kv = attn_batches * 2 *head_dim; const int batch_stride_q = head_dim; const int batch_stride_kv = 2 * head_dim; const int dropout_elems = attn_batches * q_seq_len * k_seq_len; const float alpha = 1.0; const float beta = 0.0; const float scale = 1.0 / sqrt(static_cast<float>(head_dim)); // printf("Input kernel sizes: %d %d %d \n", // inputs_kv.size(0), inputs_kv.size(1), inputs_kv.size(2)); // There is no reason to use more than one stream as every kernel is // sequentially dependent hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream(); hipblasSetStream(handle, stream); // 3 Intermediate Results + Output (Note: dropout intermediates are generated by ATen library code) auto act_options = inputs_q.options().requires_grad(false); auto mask_options = act_options.dtype(torch::kUInt8); torch::Tensor input_lin_q_results = torch::empty({q_seq_len, sequences, output_lin_q_dim}, act_options); torch::Tensor input_lin_kv_results = torch::empty({k_seq_len, sequences, output_lin_kv_dim}, act_options); torch::Tensor attn_scores = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor softmax_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor dropout_mask = torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options); torch::Tensor matmul2_results = torch::empty({q_seq_len, attn_batches, head_dim}, act_options); torch::Tensor outputs = torch::empty_like(inputs_q, act_options); // Input Linear Results Pointers to Q, K, and V of interviewed activations void* q_lin_results_ptr = static_cast<void*>(input_lin_q_results.data_ptr()); void* k_lin_results_ptr = static_cast<void*>(input_lin_kv_results.data_ptr()); void* v_lin_results_ptr = static_cast<void*>(static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim); void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr()); void* dropout_results_ptr = static_cast<void*>(dropout_results.data_ptr()); // Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax) void* attn_scores_ptr = static_cast<void*>(attn_scores.data_ptr()); char a_layout_t{'t'}; char a_layout_n{'n'}; char b_layout_n{'n'}; THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); // Input Linear Q Fwd THCublasCheck(hipblasGemmEx(handle, HIPBLAS_OP_T, // A transpose HIPBLAS_OP_N, // B wo/ transpose output_lin_q_dim, // embed_dim batches_q, // bsz x len_q embed_dim, // embed_dim static_cast<const void*>(&alpha), static_cast<const void*>(input_weights_q.data_ptr()), // weight emb_out x emb_in transposed HIP_R_16F, embed_dim, // lda so A has size [lda x m] -> [embed_dim x output_lin_q_dim] static_cast<const void*>(inputs_q.data_ptr()), // input Q HIP_R_16F, embed_dim, // ldb B has size [lda xn] -> [embed_dim x batches_q] static_cast<const void*>(&beta), // beta q_lin_results_ptr, // C -> emb * B HIP_R_16F, output_lin_q_dim, // ldc C [lda x n] -> [embed_dim x batches_q] HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Input Linear KV Fwd THCublasCheck(hipblasGemmEx(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, output_lin_kv_dim, batches_kv, embed_dim, static_cast<const void*>(&alpha), static_cast<const void*>(input_weights_kv.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(inputs_kv.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(&beta), k_lin_results_ptr, HIP_R_16F, output_lin_kv_dim, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // MatMul1 of Dot-Product Attention Plus scaling by 1/Sqrt(head size) gemm_switch_fp32accum( state, a_layout_t, b_layout_n, k_seq_len, // m q_seq_len, // n head_dim, // k scale, static_cast<const half*>(k_lin_results_ptr), lead_dim_kv, // lda batch_stride_kv, //strideA static_cast<const half*>(q_lin_results_ptr), lead_dim_q, // ldb batch_stride_q, //strideB beta, static_cast<half*>(attn_scores_ptr), // [attn_batches * len_q * len_k] k_seq_len, // ldc k_seq_len*q_seq_len, // stride c attn_batches); // p // need to call padding from torch interface here. attn_scores.view({sequences, heads, q_seq_len, k_seq_len}).masked_fill_(pad_mask, -std::numeric_limits<float>::infinity()); attn_scores.view({sequences*heads, q_seq_len, k_seq_len}); bool softmax_success = false; if (is_training && dropout_prob > 0.0) { softmax_success = dispatch_softmax_dropout<half, half, float>( reinterpret_cast<half*>(dropout_results_ptr), reinterpret_cast<half*>(softmax_results_ptr), reinterpret_cast<uint8_t*>(dropout_mask.data_ptr<uint8_t>()), reinterpret_cast<const half*>(attn_scores_ptr), dropout_elems, k_seq_len, k_seq_len, attn_batches*q_seq_len, (1.0f - dropout_prob), stream); } else { softmax_success = dispatch_softmax<half, half, float>( reinterpret_cast<half*>(dropout_results_ptr), reinterpret_cast<const half*>(attn_scores_ptr), dropout_elems, k_seq_len, k_seq_len, attn_batches*q_seq_len, stream); softmax_results.copy_(dropout_results); } assert(softmax_success); // Matmul2 // matrix kv has size len_k * batch_size * (2 * heads * head_dim) // dropout results [bsz*heads, len_q, len_k] // matmul2_results is [len_q x attn_batches x head_dim] gemm_switch_fp32accum( state, a_layout_n, b_layout_n, head_dim, // m q_seq_len, // n k_seq_len, // k alpha, static_cast<const half*>(v_lin_results_ptr), // A_i [head_dimxk_seq_len] lead_dim_kv, // attn_batches * 2 *head_dim batch_stride_kv, // stride = 2 * head_dim static_cast<const half*>(dropout_results.data_ptr()), // B_i [k_seq_len x q_seq_len] k_seq_len, // lead_dim k_seq_len*q_seq_len, // stride beta, static_cast<half*>(matmul2_results.data_ptr()), head_dim*attn_batches, // ldc head_dim, // stride c attn_batches); //p // Output Linear THCublasCheck(hipblasGemmEx(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, embed_dim, batches_q, embed_dim, static_cast<const void*>(&alpha), static_cast<const void*>(output_weights.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(matmul2_results.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(&beta), static_cast<void*>(outputs.data_ptr()), HIP_R_16F, embed_dim, HIP_R_32F, //CUBLAS_GEMM_ALGO1_TENSOR_OP)); CUBLAS_GEMM_DEFAULT_TENSOR_OP)); THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); return { input_lin_q_results, input_lin_kv_results, softmax_results, dropout_results, dropout_mask, matmul2_results, outputs }; } std::vector<torch::Tensor> bwd_cuda( int heads, torch::Tensor const& output_grads, torch::Tensor const& matmul2_results, torch::Tensor const& dropout_results, torch::Tensor const& softmax_results, torch::Tensor const& input_lin_q_results, torch::Tensor const& input_lin_kv_results, torch::Tensor const& inputs_q, torch::Tensor const& inputs_kv, torch::Tensor const& input_weights_q, torch::Tensor const& input_weights_kv, torch::Tensor const& output_weights, torch::Tensor const& dropout_mask, float dropout_prob ) { const int embed_dim = inputs_q.size(2); const int sequences = inputs_q.size(1); const int q_seq_len = inputs_q.size(0); const int k_seq_len = inputs_kv.size(0); const int batches_q = sequences * q_seq_len; const int batches_kv = sequences * k_seq_len; const int head_dim = embed_dim / heads; const int output_lin_q_dim = embed_dim; const int output_lin_kv_dim = 2 * embed_dim; const int attn_batches = heads * sequences; const int lead_dim_q = attn_batches * head_dim; const int lead_dim_kv = attn_batches * 2 *head_dim; const int batch_stride_q = head_dim; const int batch_stride_kv = 2 * head_dim; // const int dropout_elems = attn_batches * q_seq_len * k_seq_len; const float alpha = 1.0; const float beta = 0.0; const float scale = 1.0 / sqrt(static_cast<float>(head_dim)); // TODO: Streams can be used in Backprop but I haven't added more than one // in my first attempt to create the code hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream(); hipblasSetStream(handle, stream); // Output Tensor Allocations torch::Tensor input_q_grads = torch::empty_like(inputs_q); torch::Tensor input_kv_grads = torch::empty_like(inputs_kv); torch::Tensor input_weight_q_grads = torch::empty_like(input_weights_q); torch::Tensor input_weight_kv_grads = torch::empty_like(input_weights_kv); torch::Tensor output_weight_grads = torch::empty_like(output_weights); // Intermediate Tensor Allocations at::Tensor output_lin_grads = torch::empty_like(matmul2_results); at::Tensor matmul2_grads = torch::empty_like(softmax_results); at::Tensor input_lin_q_output_grads = torch::empty_like(input_lin_q_results); at::Tensor input_lin_kv_output_grads = torch::empty_like(input_lin_kv_results); auto q_lin_results_ptr = static_cast<half*>(input_lin_q_results.data_ptr()); auto k_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()); auto v_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim; auto q_lin_grads_ptr = static_cast<half*>(input_lin_q_output_grads.data_ptr()); auto k_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()); auto v_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()) + head_dim; char a_layout_n{'n'}; char a_layout_t{'t'}; char b_layout_n{'n'}; char b_layout_t{'t'}; THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); // Output Linear Dgrad // C = alpha * op(A) op(B) + BetaC // op(A): mxk, op(B): kxn C: mxn THCublasCheck(hipblasGemmEx(handle, // HIPBLAS_OP_N, // no transpose HIPBLAS_OP_N, // no transpose embed_dim, // m batches_q, // n = bsz * len_q embed_dim, // k static_cast<const void*>(&alpha), // alpha = 1.0 static_cast<const void*>(output_weights.data_ptr()), // A mxk HIP_R_16F, // data type embed_dim, // leading dimension of A (embed dim) (the rows) static_cast<const void*>(output_grads.data_ptr()), // B kxn HIP_R_16F, // data type embed_dim, // leading dimension of B (embed dim) static_cast<const void*>(&beta), // beta static_cast<void*>(output_lin_grads.data_ptr()), // C mxn HIP_R_16F, // data type embed_dim, // ldc HIP_R_32F, // compute type CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Output Linear Wgrad THCublasCheck(hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, embed_dim, embed_dim, batches_q, static_cast<const void*>(&alpha), static_cast<const void*>(matmul2_results.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(output_grads.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(&beta), static_cast<void*>(output_weight_grads.data_ptr()), HIP_R_16F, embed_dim, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // MatMul2 Dgrad1 gemm_switch_fp32accum( state, a_layout_t, b_layout_n, k_seq_len, q_seq_len, head_dim, alpha, static_cast<const half*>(v_lin_results_ptr), lead_dim_kv, batch_stride_kv, // 2 * head_dim static_cast<const half*>(output_lin_grads.data_ptr()), head_dim*attn_batches, head_dim, beta, static_cast<half*>(matmul2_grads.data_ptr()), k_seq_len, k_seq_len*q_seq_len, attn_batches); // Matmul2 Dgrad2 gemm_switch_fp32accum( state, a_layout_n, b_layout_t, head_dim, k_seq_len, q_seq_len, alpha, static_cast<const half*>(output_lin_grads.data_ptr()), head_dim*attn_batches, head_dim, static_cast<const half*>(dropout_results.data_ptr()), k_seq_len, k_seq_len*q_seq_len, beta, v_lin_grads_ptr, lead_dim_kv, batch_stride_kv, attn_batches); // bool softmax_success = false; dispatch_masked_scale_softmax_backward_recompute<half, half, float, false>( static_cast<half*>(matmul2_grads.data_ptr()), static_cast<half* const>(matmul2_grads.data_ptr()), reinterpret_cast<half const*>(softmax_results.data_ptr()), static_cast<uint8_t const*>(dropout_mask.data_ptr()), 1.0/(1.0-dropout_prob), k_seq_len, k_seq_len, attn_batches*q_seq_len, stream); // Matmul1 Dgrad1 gemm_switch_fp32accum( state, a_layout_n, b_layout_n, head_dim, q_seq_len, k_seq_len, scale, k_lin_results_ptr, lead_dim_kv, batch_stride_kv, static_cast<half*>(matmul2_grads.data_ptr()), k_seq_len, k_seq_len*q_seq_len, beta, q_lin_grads_ptr, lead_dim_q, batch_stride_q, attn_batches); // Matmul1 Dgrad2 gemm_switch_fp32accum( state, a_layout_n, b_layout_t, head_dim, k_seq_len, q_seq_len, scale, q_lin_results_ptr, lead_dim_q, batch_stride_q, static_cast<half*>(matmul2_grads.data_ptr()), k_seq_len, k_seq_len*q_seq_len, beta, k_lin_grads_ptr, lead_dim_kv, batch_stride_kv, attn_batches); // Input Linear Q Dgrad THCublasCheck(hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, embed_dim, batches_q, output_lin_q_dim, static_cast<const void*>(&alpha), static_cast<const void*>(input_weights_q.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(q_lin_grads_ptr), HIP_R_16F, output_lin_q_dim, static_cast<const void*>(&beta), static_cast<void*>(input_q_grads.data_ptr()), HIP_R_16F, embed_dim, HIP_R_32F, //CUBLAS_GEMM_ALGO10_TENSOR_OP)); CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Input Linear Q Wgrad THCublasCheck(hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, embed_dim, output_lin_q_dim, batches_q, static_cast<const void*>(&alpha), static_cast<const void*>(inputs_q.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(q_lin_grads_ptr), HIP_R_16F, output_lin_q_dim, static_cast<const void*>(&beta), static_cast<void*>(input_weight_q_grads.data_ptr()), HIP_R_16F, embed_dim, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Input Linear KV Dgrad THCublasCheck(hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, embed_dim, batches_kv, output_lin_kv_dim, static_cast<const void*>(&alpha), static_cast<const void*>(input_weights_kv.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(k_lin_grads_ptr), HIP_R_16F, output_lin_kv_dim, static_cast<const void*>(&beta), static_cast<void*>(input_kv_grads.data_ptr()), HIP_R_16F, embed_dim, HIP_R_32F, //CUBLAS_GEMM_ALGO10_TENSOR_OP)); CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Input Linear KV Wgrad THCublasCheck(hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, embed_dim, output_lin_kv_dim, batches_kv, static_cast<const void*>(&alpha), static_cast<const void*>(inputs_kv.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(k_lin_grads_ptr), HIP_R_16F, output_lin_kv_dim, static_cast<const void*>(&beta), static_cast<void*>(input_weight_kv_grads.data_ptr()), HIP_R_16F, embed_dim, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); return { input_q_grads, input_kv_grads, input_weight_q_grads, input_weight_kv_grads, output_weight_grads }; } std::vector<torch::Tensor> bwd_recompute_cuda( int heads, torch::Tensor const& output_grads, torch::Tensor const& inputs_q, torch::Tensor const& inputs_kv, torch::Tensor const& input_weights_q, torch::Tensor const& input_weights_kv, torch::Tensor const& output_weights, torch::Tensor const& dropout_mask, torch::Tensor const& pad_mask, float dropout_prob ) { const int embed_dim = inputs_q.size(2); const int sequences = inputs_q.size(1); const int q_seq_len = inputs_q.size(0); const int k_seq_len = inputs_kv.size(0); const int batches_q = sequences * q_seq_len; const int batches_kv = sequences * k_seq_len; const int head_dim = embed_dim / heads; const int output_lin_q_dim = embed_dim; const int output_lin_kv_dim = 2 * embed_dim; const int attn_batches = heads * sequences; const int lead_dim_q = attn_batches * head_dim; const int lead_dim_kv = attn_batches * 2 *head_dim; const int batch_stride_q = head_dim; const int batch_stride_kv = 2 * head_dim; const int dropout_elems = attn_batches * q_seq_len * k_seq_len; const float alpha = 1.0; const float beta = 0.0; const float scale = 1.0 / sqrt(static_cast<float>(head_dim)); // TODO: Streams can be used in Backprop but I haven't added more than one // in my first attempt to create the code hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream(); hipblasSetStream(handle, stream); // Tensor allocations for recomputation auto act_options = inputs_q.options().requires_grad(false); torch::Tensor input_lin_q_results = torch::empty({q_seq_len, sequences, output_lin_q_dim}, act_options); torch::Tensor input_lin_kv_results = torch::empty({k_seq_len, sequences, output_lin_kv_dim}, act_options); torch::Tensor attn_scores = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor softmax_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor matmul2_results = torch::empty({q_seq_len, attn_batches, head_dim}, act_options); void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr()); void* dropout_results_ptr = static_cast<void*>(dropout_results.data_ptr()); // Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax) void* attn_scores_ptr = static_cast<void*>(attn_scores.data_ptr()); // Output Tensor Allocations torch::Tensor input_q_grads = torch::empty_like(inputs_q); torch::Tensor input_kv_grads = torch::empty_like(inputs_kv); torch::Tensor input_weight_q_grads = torch::empty_like(input_weights_q); torch::Tensor input_weight_kv_grads = torch::empty_like(input_weights_kv); torch::Tensor output_weight_grads = torch::empty_like(output_weights); // Intermediate Tensor Allocations at::Tensor output_lin_grads = torch::empty_like(matmul2_results); at::Tensor matmul2_grads = torch::empty_like(softmax_results); at::Tensor input_lin_q_output_grads = torch::empty_like(input_lin_q_results); at::Tensor input_lin_kv_output_grads = torch::empty_like(input_lin_kv_results); auto q_lin_results_ptr = static_cast<half*>(input_lin_q_results.data_ptr()); auto k_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()); auto v_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim; auto q_lin_grads_ptr = static_cast<half*>(input_lin_q_output_grads.data_ptr()); auto k_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()); auto v_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()) + head_dim; char a_layout_n{'n'}; char a_layout_t{'t'}; char b_layout_n{'n'}; char b_layout_t{'t'}; THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); // Input Linear Q Fwd THCublasCheck(hipblasGemmEx(handle, HIPBLAS_OP_T, // A transpose HIPBLAS_OP_N, // B wo/ transpose output_lin_q_dim, // embed_dim batches_q, // bsz x len_q embed_dim, // embed_dim static_cast<const void*>(&alpha), static_cast<const void*>(input_weights_q.data_ptr()), // weight emb_out x emb_in transposed HIP_R_16F, embed_dim, // lda so A has size [lda x m] -> [embed_dim x output_lin_q_dim] static_cast<const void*>(inputs_q.data_ptr()), // input Q HIP_R_16F, embed_dim, // ldb B has size [lda xn] -> [embed_dim x batches_q] static_cast<const void*>(&beta), // beta q_lin_results_ptr, // C -> emb * B HIP_R_16F, output_lin_q_dim, // ldc C [lda x n] -> [embed_dim x batches_q] HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Input Linear KV Fwd THCublasCheck(hipblasGemmEx(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, output_lin_kv_dim, batches_kv, embed_dim, static_cast<const void*>(&alpha), static_cast<const void*>(input_weights_kv.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(inputs_kv.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(&beta), k_lin_results_ptr, HIP_R_16F, output_lin_kv_dim, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // MatMul1 of Dot-Product Attention Plus scaling by 1/Sqrt(head size) gemm_switch_fp32accum( state, a_layout_t, b_layout_n, k_seq_len, // m q_seq_len, // n head_dim, // k scale, static_cast<const half*>(k_lin_results_ptr), lead_dim_kv, // lda batch_stride_kv, //strideA static_cast<const half*>(q_lin_results_ptr), lead_dim_q, // ldb batch_stride_q, //strideB beta, static_cast<half*>(attn_scores_ptr), // [attn_batches * len_q * len_k] k_seq_len, // ldc k_seq_len*q_seq_len, // stride c attn_batches); // p // need to call padding from torch interface here. attn_scores.view({sequences, heads, q_seq_len, k_seq_len}).masked_fill_(pad_mask, -std::numeric_limits<float>::infinity()); attn_scores.view({sequences*heads, q_seq_len, k_seq_len}); bool softmax_success = false; // run softmax dropout again but don't change the dropout mask softmax_success = dispatch_softmax_dropout_presampled<half, half, float>( reinterpret_cast<half*>(dropout_results_ptr), reinterpret_cast<half*>(softmax_results_ptr), reinterpret_cast<const uint8_t*>(dropout_mask.data_ptr<uint8_t>()), reinterpret_cast<const half*>(attn_scores_ptr), dropout_elems, k_seq_len, k_seq_len, attn_batches*q_seq_len, (1.0f - dropout_prob), stream); assert(softmax_success); // Matmul2 // matrix kv has size len_k * batch_size * (2 * heads * head_dim) // dropout results [bsz*heads, len_q, len_k] // matmul2_results is [len_q x attn_batches x head_dim] gemm_switch_fp32accum( state, a_layout_n, b_layout_n, head_dim, // m q_seq_len, // n k_seq_len, // k alpha, static_cast<const half*>(v_lin_results_ptr), // A_i [head_dimxk_seq_len] lead_dim_kv, // attn_batches * 2 *head_dim batch_stride_kv, // stride = 2 * head_dim static_cast<const half*>(dropout_results.data_ptr()), // B_i [k_seq_len x q_seq_len] k_seq_len, // lead_dim k_seq_len*q_seq_len, // stride beta, static_cast<half*>(matmul2_results.data_ptr()), head_dim*attn_batches, // ldc head_dim, // stride c attn_batches); //p ////////////////////////////////////////// Recomputation Done ///////////////////////////////////// // Output Linear Dgrad // C = alpha * op(A) op(B) + BetaC // op(A): mxk, op(B): kxn C: mxn THCublasCheck(hipblasGemmEx(handle, // HIPBLAS_OP_N, // no transpose HIPBLAS_OP_N, // no transpose embed_dim, // m batches_q, // n = bsz * len_q embed_dim, // k static_cast<const void*>(&alpha), // alpha = 1.0 static_cast<const void*>(output_weights.data_ptr()), // A mxk HIP_R_16F, // data type embed_dim, // leading dimension of A (embed dim) (the rows) static_cast<const void*>(output_grads.data_ptr()), // B kxn HIP_R_16F, // data type embed_dim, // leading dimension of B (embed dim) static_cast<const void*>(&beta), // beta static_cast<void*>(output_lin_grads.data_ptr()), // C mxn HIP_R_16F, // data type embed_dim, // ldc HIP_R_32F, // compute type CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Output Linear Wgrad THCublasCheck(hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, embed_dim, embed_dim, batches_q, static_cast<const void*>(&alpha), static_cast<const void*>(matmul2_results.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(output_grads.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(&beta), static_cast<void*>(output_weight_grads.data_ptr()), HIP_R_16F, embed_dim, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // MatMul2 Dgrad1 gemm_switch_fp32accum( state, a_layout_t, b_layout_n, k_seq_len, q_seq_len, head_dim, alpha, static_cast<const half*>(v_lin_results_ptr), lead_dim_kv, batch_stride_kv, // 2 * head_dim static_cast<const half*>(output_lin_grads.data_ptr()), head_dim*attn_batches, head_dim, beta, static_cast<half*>(matmul2_grads.data_ptr()), k_seq_len, k_seq_len*q_seq_len, attn_batches); // Matmul2 Dgrad2 gemm_switch_fp32accum( state, a_layout_n, b_layout_t, head_dim, k_seq_len, q_seq_len, alpha, static_cast<const half*>(output_lin_grads.data_ptr()), head_dim*attn_batches, head_dim, static_cast<const half*>(dropout_results.data_ptr()), k_seq_len, k_seq_len*q_seq_len, beta, v_lin_grads_ptr, lead_dim_kv, batch_stride_kv, attn_batches); dispatch_masked_scale_softmax_backward_recompute<half, half, float, false>( static_cast<half*>(matmul2_grads.data_ptr()), static_cast<half* const>(matmul2_grads.data_ptr()), reinterpret_cast<half const*>(softmax_results.data_ptr()), static_cast<uint8_t const*>(dropout_mask.data_ptr()), 1.0/(1.0-dropout_prob), k_seq_len, k_seq_len, attn_batches*q_seq_len, stream); // Matmul1 Dgrad1 gemm_switch_fp32accum( state, a_layout_n, b_layout_n, head_dim, q_seq_len, k_seq_len, scale, k_lin_results_ptr, lead_dim_kv, batch_stride_kv, static_cast<half*>(matmul2_grads.data_ptr()), k_seq_len, k_seq_len*q_seq_len, beta, q_lin_grads_ptr, lead_dim_q, batch_stride_q, attn_batches); // Matmul1 Dgrad2 gemm_switch_fp32accum( state, a_layout_n, b_layout_t, head_dim, k_seq_len, q_seq_len, scale, q_lin_results_ptr, lead_dim_q, batch_stride_q, static_cast<half*>(matmul2_grads.data_ptr()), k_seq_len, k_seq_len*q_seq_len, beta, k_lin_grads_ptr, lead_dim_kv, batch_stride_kv, attn_batches); // Input Linear Q Dgrad THCublasCheck(hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, embed_dim, batches_q, output_lin_q_dim, static_cast<const void*>(&alpha), static_cast<const void*>(input_weights_q.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(q_lin_grads_ptr), HIP_R_16F, output_lin_q_dim, static_cast<const void*>(&beta), static_cast<void*>(input_q_grads.data_ptr()), HIP_R_16F, embed_dim, HIP_R_32F, //CUBLAS_GEMM_ALGO10_TENSOR_OP)); CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Input Linear Q Wgrad THCublasCheck(hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, embed_dim, output_lin_q_dim, batches_q, static_cast<const void*>(&alpha), static_cast<const void*>(inputs_q.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(q_lin_grads_ptr), HIP_R_16F, output_lin_q_dim, static_cast<const void*>(&beta), static_cast<void*>(input_weight_q_grads.data_ptr()), HIP_R_16F, embed_dim, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Input Linear KV Dgrad THCublasCheck(hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, embed_dim, batches_kv, output_lin_kv_dim, static_cast<const void*>(&alpha), static_cast<const void*>(input_weights_kv.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(k_lin_grads_ptr), HIP_R_16F, output_lin_kv_dim, static_cast<const void*>(&beta), static_cast<void*>(input_kv_grads.data_ptr()), HIP_R_16F, embed_dim, HIP_R_32F, //CUBLAS_GEMM_ALGO10_TENSOR_OP)); CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Input Linear KV Wgrad THCublasCheck(hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, embed_dim, output_lin_kv_dim, batches_kv, static_cast<const void*>(&alpha), static_cast<const void*>(inputs_kv.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(k_lin_grads_ptr), HIP_R_16F, output_lin_kv_dim, static_cast<const void*>(&beta), static_cast<void*>(input_weight_kv_grads.data_ptr()), HIP_R_16F, embed_dim, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); return { input_q_grads, input_kv_grads, input_weight_q_grads, input_weight_kv_grads, output_weight_grads }; } } // end namespace cublas_gemmex } // end namespace encdec } // end namespace multihead_attn
d7aeaedd150fd9e76af568adc1e7e3d422fea5ac.cu
#include <vector> #include <iostream> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cuda_profiler_api.h> #include "THC/THC.h" #include <ATen/cuda/CUDAContext.h> #include <torch/extension.h> #include <math.h> #include "softmax_dropout.h" // symbol to be automatically resolved by PyTorch libs extern THCState *state; namespace multihead_attn { namespace encdec { namespace cublas_gemmex { std::vector<torch::Tensor> fwd_cuda( bool is_training, int heads, torch::Tensor const& inputs_q, torch::Tensor const& inputs_kv, torch::Tensor const& input_weights_q, torch::Tensor const& input_weights_kv, torch::Tensor const& output_weights, torch::Tensor const& pad_mask, float dropout_prob ) { const int embed_dim = inputs_q.size(2); const int sequences = inputs_q.size(1); const int q_seq_len = inputs_q.size(0); const int k_seq_len = inputs_kv.size(0); const int batches_q = sequences * q_seq_len; const int batches_kv = sequences * k_seq_len; const int head_dim = embed_dim / heads; const int output_lin_q_dim = embed_dim; const int output_lin_kv_dim = 2 * embed_dim; const int attn_batches = heads * sequences; const int lead_dim_q = attn_batches * head_dim; const int lead_dim_kv = attn_batches * 2 *head_dim; const int batch_stride_q = head_dim; const int batch_stride_kv = 2 * head_dim; const int dropout_elems = attn_batches * q_seq_len * k_seq_len; const float alpha = 1.0; const float beta = 0.0; const float scale = 1.0 / sqrt(static_cast<float>(head_dim)); // printf("Input kernel sizes: %d %d %d \n", // inputs_kv.size(0), inputs_kv.size(1), inputs_kv.size(2)); // There is no reason to use more than one stream as every kernel is // sequentially dependent cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream(); cublasSetStream(handle, stream); // 3 Intermediate Results + Output (Note: dropout intermediates are generated by ATen library code) auto act_options = inputs_q.options().requires_grad(false); auto mask_options = act_options.dtype(torch::kUInt8); torch::Tensor input_lin_q_results = torch::empty({q_seq_len, sequences, output_lin_q_dim}, act_options); torch::Tensor input_lin_kv_results = torch::empty({k_seq_len, sequences, output_lin_kv_dim}, act_options); torch::Tensor attn_scores = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor softmax_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor dropout_mask = torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options); torch::Tensor matmul2_results = torch::empty({q_seq_len, attn_batches, head_dim}, act_options); torch::Tensor outputs = torch::empty_like(inputs_q, act_options); // Input Linear Results Pointers to Q, K, and V of interviewed activations void* q_lin_results_ptr = static_cast<void*>(input_lin_q_results.data_ptr()); void* k_lin_results_ptr = static_cast<void*>(input_lin_kv_results.data_ptr()); void* v_lin_results_ptr = static_cast<void*>(static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim); void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr()); void* dropout_results_ptr = static_cast<void*>(dropout_results.data_ptr()); // Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax) void* attn_scores_ptr = static_cast<void*>(attn_scores.data_ptr()); char a_layout_t{'t'}; char a_layout_n{'n'}; char b_layout_n{'n'}; THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); // Input Linear Q Fwd THCublasCheck(cublasGemmEx(handle, CUBLAS_OP_T, // A transpose CUBLAS_OP_N, // B wo/ transpose output_lin_q_dim, // embed_dim batches_q, // bsz x len_q embed_dim, // embed_dim static_cast<const void*>(&alpha), static_cast<const void*>(input_weights_q.data_ptr()), // weight emb_out x emb_in transposed CUDA_R_16F, embed_dim, // lda so A has size [lda x m] -> [embed_dim x output_lin_q_dim] static_cast<const void*>(inputs_q.data_ptr()), // input Q CUDA_R_16F, embed_dim, // ldb B has size [lda xn] -> [embed_dim x batches_q] static_cast<const void*>(&beta), // beta q_lin_results_ptr, // C -> emb * B CUDA_R_16F, output_lin_q_dim, // ldc C [lda x n] -> [embed_dim x batches_q] CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Input Linear KV Fwd THCublasCheck(cublasGemmEx(handle, CUBLAS_OP_T, CUBLAS_OP_N, output_lin_kv_dim, batches_kv, embed_dim, static_cast<const void*>(&alpha), static_cast<const void*>(input_weights_kv.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(inputs_kv.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(&beta), k_lin_results_ptr, CUDA_R_16F, output_lin_kv_dim, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // MatMul1 of Dot-Product Attention Plus scaling by 1/Sqrt(head size) gemm_switch_fp32accum( state, a_layout_t, b_layout_n, k_seq_len, // m q_seq_len, // n head_dim, // k scale, static_cast<const half*>(k_lin_results_ptr), lead_dim_kv, // lda batch_stride_kv, //strideA static_cast<const half*>(q_lin_results_ptr), lead_dim_q, // ldb batch_stride_q, //strideB beta, static_cast<half*>(attn_scores_ptr), // [attn_batches * len_q * len_k] k_seq_len, // ldc k_seq_len*q_seq_len, // stride c attn_batches); // p // need to call padding from torch interface here. attn_scores.view({sequences, heads, q_seq_len, k_seq_len}).masked_fill_(pad_mask, -std::numeric_limits<float>::infinity()); attn_scores.view({sequences*heads, q_seq_len, k_seq_len}); bool softmax_success = false; if (is_training && dropout_prob > 0.0) { softmax_success = dispatch_softmax_dropout<half, half, float>( reinterpret_cast<half*>(dropout_results_ptr), reinterpret_cast<half*>(softmax_results_ptr), reinterpret_cast<uint8_t*>(dropout_mask.data_ptr<uint8_t>()), reinterpret_cast<const half*>(attn_scores_ptr), dropout_elems, k_seq_len, k_seq_len, attn_batches*q_seq_len, (1.0f - dropout_prob), stream); } else { softmax_success = dispatch_softmax<half, half, float>( reinterpret_cast<half*>(dropout_results_ptr), reinterpret_cast<const half*>(attn_scores_ptr), dropout_elems, k_seq_len, k_seq_len, attn_batches*q_seq_len, stream); softmax_results.copy_(dropout_results); } assert(softmax_success); // Matmul2 // matrix kv has size len_k * batch_size * (2 * heads * head_dim) // dropout results [bsz*heads, len_q, len_k] // matmul2_results is [len_q x attn_batches x head_dim] gemm_switch_fp32accum( state, a_layout_n, b_layout_n, head_dim, // m q_seq_len, // n k_seq_len, // k alpha, static_cast<const half*>(v_lin_results_ptr), // A_i [head_dimxk_seq_len] lead_dim_kv, // attn_batches * 2 *head_dim batch_stride_kv, // stride = 2 * head_dim static_cast<const half*>(dropout_results.data_ptr()), // B_i [k_seq_len x q_seq_len] k_seq_len, // lead_dim k_seq_len*q_seq_len, // stride beta, static_cast<half*>(matmul2_results.data_ptr()), head_dim*attn_batches, // ldc head_dim, // stride c attn_batches); //p // Output Linear THCublasCheck(cublasGemmEx(handle, CUBLAS_OP_T, CUBLAS_OP_N, embed_dim, batches_q, embed_dim, static_cast<const void*>(&alpha), static_cast<const void*>(output_weights.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(matmul2_results.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(&beta), static_cast<void*>(outputs.data_ptr()), CUDA_R_16F, embed_dim, CUDA_R_32F, //CUBLAS_GEMM_ALGO1_TENSOR_OP)); CUBLAS_GEMM_DEFAULT_TENSOR_OP)); THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); return { input_lin_q_results, input_lin_kv_results, softmax_results, dropout_results, dropout_mask, matmul2_results, outputs }; } std::vector<torch::Tensor> bwd_cuda( int heads, torch::Tensor const& output_grads, torch::Tensor const& matmul2_results, torch::Tensor const& dropout_results, torch::Tensor const& softmax_results, torch::Tensor const& input_lin_q_results, torch::Tensor const& input_lin_kv_results, torch::Tensor const& inputs_q, torch::Tensor const& inputs_kv, torch::Tensor const& input_weights_q, torch::Tensor const& input_weights_kv, torch::Tensor const& output_weights, torch::Tensor const& dropout_mask, float dropout_prob ) { const int embed_dim = inputs_q.size(2); const int sequences = inputs_q.size(1); const int q_seq_len = inputs_q.size(0); const int k_seq_len = inputs_kv.size(0); const int batches_q = sequences * q_seq_len; const int batches_kv = sequences * k_seq_len; const int head_dim = embed_dim / heads; const int output_lin_q_dim = embed_dim; const int output_lin_kv_dim = 2 * embed_dim; const int attn_batches = heads * sequences; const int lead_dim_q = attn_batches * head_dim; const int lead_dim_kv = attn_batches * 2 *head_dim; const int batch_stride_q = head_dim; const int batch_stride_kv = 2 * head_dim; // const int dropout_elems = attn_batches * q_seq_len * k_seq_len; const float alpha = 1.0; const float beta = 0.0; const float scale = 1.0 / sqrt(static_cast<float>(head_dim)); // TODO: Streams can be used in Backprop but I haven't added more than one // in my first attempt to create the code cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream(); cublasSetStream(handle, stream); // Output Tensor Allocations torch::Tensor input_q_grads = torch::empty_like(inputs_q); torch::Tensor input_kv_grads = torch::empty_like(inputs_kv); torch::Tensor input_weight_q_grads = torch::empty_like(input_weights_q); torch::Tensor input_weight_kv_grads = torch::empty_like(input_weights_kv); torch::Tensor output_weight_grads = torch::empty_like(output_weights); // Intermediate Tensor Allocations at::Tensor output_lin_grads = torch::empty_like(matmul2_results); at::Tensor matmul2_grads = torch::empty_like(softmax_results); at::Tensor input_lin_q_output_grads = torch::empty_like(input_lin_q_results); at::Tensor input_lin_kv_output_grads = torch::empty_like(input_lin_kv_results); auto q_lin_results_ptr = static_cast<half*>(input_lin_q_results.data_ptr()); auto k_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()); auto v_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim; auto q_lin_grads_ptr = static_cast<half*>(input_lin_q_output_grads.data_ptr()); auto k_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()); auto v_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()) + head_dim; char a_layout_n{'n'}; char a_layout_t{'t'}; char b_layout_n{'n'}; char b_layout_t{'t'}; THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); // Output Linear Dgrad // C = alpha * op(A) op(B) + BetaC // op(A): mxk, op(B): kxn C: mxn THCublasCheck(cublasGemmEx(handle, // CUBLAS_OP_N, // no transpose CUBLAS_OP_N, // no transpose embed_dim, // m batches_q, // n = bsz * len_q embed_dim, // k static_cast<const void*>(&alpha), // alpha = 1.0 static_cast<const void*>(output_weights.data_ptr()), // A mxk CUDA_R_16F, // data type embed_dim, // leading dimension of A (embed dim) (the rows) static_cast<const void*>(output_grads.data_ptr()), // B kxn CUDA_R_16F, // data type embed_dim, // leading dimension of B (embed dim) static_cast<const void*>(&beta), // beta static_cast<void*>(output_lin_grads.data_ptr()), // C mxn CUDA_R_16F, // data type embed_dim, // ldc CUDA_R_32F, // compute type CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Output Linear Wgrad THCublasCheck(cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_T, embed_dim, embed_dim, batches_q, static_cast<const void*>(&alpha), static_cast<const void*>(matmul2_results.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(output_grads.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(&beta), static_cast<void*>(output_weight_grads.data_ptr()), CUDA_R_16F, embed_dim, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // MatMul2 Dgrad1 gemm_switch_fp32accum( state, a_layout_t, b_layout_n, k_seq_len, q_seq_len, head_dim, alpha, static_cast<const half*>(v_lin_results_ptr), lead_dim_kv, batch_stride_kv, // 2 * head_dim static_cast<const half*>(output_lin_grads.data_ptr()), head_dim*attn_batches, head_dim, beta, static_cast<half*>(matmul2_grads.data_ptr()), k_seq_len, k_seq_len*q_seq_len, attn_batches); // Matmul2 Dgrad2 gemm_switch_fp32accum( state, a_layout_n, b_layout_t, head_dim, k_seq_len, q_seq_len, alpha, static_cast<const half*>(output_lin_grads.data_ptr()), head_dim*attn_batches, head_dim, static_cast<const half*>(dropout_results.data_ptr()), k_seq_len, k_seq_len*q_seq_len, beta, v_lin_grads_ptr, lead_dim_kv, batch_stride_kv, attn_batches); // bool softmax_success = false; dispatch_masked_scale_softmax_backward_recompute<half, half, float, false>( static_cast<half*>(matmul2_grads.data_ptr()), static_cast<half* const>(matmul2_grads.data_ptr()), reinterpret_cast<half const*>(softmax_results.data_ptr()), static_cast<uint8_t const*>(dropout_mask.data_ptr()), 1.0/(1.0-dropout_prob), k_seq_len, k_seq_len, attn_batches*q_seq_len, stream); // Matmul1 Dgrad1 gemm_switch_fp32accum( state, a_layout_n, b_layout_n, head_dim, q_seq_len, k_seq_len, scale, k_lin_results_ptr, lead_dim_kv, batch_stride_kv, static_cast<half*>(matmul2_grads.data_ptr()), k_seq_len, k_seq_len*q_seq_len, beta, q_lin_grads_ptr, lead_dim_q, batch_stride_q, attn_batches); // Matmul1 Dgrad2 gemm_switch_fp32accum( state, a_layout_n, b_layout_t, head_dim, k_seq_len, q_seq_len, scale, q_lin_results_ptr, lead_dim_q, batch_stride_q, static_cast<half*>(matmul2_grads.data_ptr()), k_seq_len, k_seq_len*q_seq_len, beta, k_lin_grads_ptr, lead_dim_kv, batch_stride_kv, attn_batches); // Input Linear Q Dgrad THCublasCheck(cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, embed_dim, batches_q, output_lin_q_dim, static_cast<const void*>(&alpha), static_cast<const void*>(input_weights_q.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(q_lin_grads_ptr), CUDA_R_16F, output_lin_q_dim, static_cast<const void*>(&beta), static_cast<void*>(input_q_grads.data_ptr()), CUDA_R_16F, embed_dim, CUDA_R_32F, //CUBLAS_GEMM_ALGO10_TENSOR_OP)); CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Input Linear Q Wgrad THCublasCheck(cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_T, embed_dim, output_lin_q_dim, batches_q, static_cast<const void*>(&alpha), static_cast<const void*>(inputs_q.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(q_lin_grads_ptr), CUDA_R_16F, output_lin_q_dim, static_cast<const void*>(&beta), static_cast<void*>(input_weight_q_grads.data_ptr()), CUDA_R_16F, embed_dim, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Input Linear KV Dgrad THCublasCheck(cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, embed_dim, batches_kv, output_lin_kv_dim, static_cast<const void*>(&alpha), static_cast<const void*>(input_weights_kv.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(k_lin_grads_ptr), CUDA_R_16F, output_lin_kv_dim, static_cast<const void*>(&beta), static_cast<void*>(input_kv_grads.data_ptr()), CUDA_R_16F, embed_dim, CUDA_R_32F, //CUBLAS_GEMM_ALGO10_TENSOR_OP)); CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Input Linear KV Wgrad THCublasCheck(cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_T, embed_dim, output_lin_kv_dim, batches_kv, static_cast<const void*>(&alpha), static_cast<const void*>(inputs_kv.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(k_lin_grads_ptr), CUDA_R_16F, output_lin_kv_dim, static_cast<const void*>(&beta), static_cast<void*>(input_weight_kv_grads.data_ptr()), CUDA_R_16F, embed_dim, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); return { input_q_grads, input_kv_grads, input_weight_q_grads, input_weight_kv_grads, output_weight_grads }; } std::vector<torch::Tensor> bwd_recompute_cuda( int heads, torch::Tensor const& output_grads, torch::Tensor const& inputs_q, torch::Tensor const& inputs_kv, torch::Tensor const& input_weights_q, torch::Tensor const& input_weights_kv, torch::Tensor const& output_weights, torch::Tensor const& dropout_mask, torch::Tensor const& pad_mask, float dropout_prob ) { const int embed_dim = inputs_q.size(2); const int sequences = inputs_q.size(1); const int q_seq_len = inputs_q.size(0); const int k_seq_len = inputs_kv.size(0); const int batches_q = sequences * q_seq_len; const int batches_kv = sequences * k_seq_len; const int head_dim = embed_dim / heads; const int output_lin_q_dim = embed_dim; const int output_lin_kv_dim = 2 * embed_dim; const int attn_batches = heads * sequences; const int lead_dim_q = attn_batches * head_dim; const int lead_dim_kv = attn_batches * 2 *head_dim; const int batch_stride_q = head_dim; const int batch_stride_kv = 2 * head_dim; const int dropout_elems = attn_batches * q_seq_len * k_seq_len; const float alpha = 1.0; const float beta = 0.0; const float scale = 1.0 / sqrt(static_cast<float>(head_dim)); // TODO: Streams can be used in Backprop but I haven't added more than one // in my first attempt to create the code cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream(); cublasSetStream(handle, stream); // Tensor allocations for recomputation auto act_options = inputs_q.options().requires_grad(false); torch::Tensor input_lin_q_results = torch::empty({q_seq_len, sequences, output_lin_q_dim}, act_options); torch::Tensor input_lin_kv_results = torch::empty({k_seq_len, sequences, output_lin_kv_dim}, act_options); torch::Tensor attn_scores = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor softmax_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor matmul2_results = torch::empty({q_seq_len, attn_batches, head_dim}, act_options); void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr()); void* dropout_results_ptr = static_cast<void*>(dropout_results.data_ptr()); // Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax) void* attn_scores_ptr = static_cast<void*>(attn_scores.data_ptr()); // Output Tensor Allocations torch::Tensor input_q_grads = torch::empty_like(inputs_q); torch::Tensor input_kv_grads = torch::empty_like(inputs_kv); torch::Tensor input_weight_q_grads = torch::empty_like(input_weights_q); torch::Tensor input_weight_kv_grads = torch::empty_like(input_weights_kv); torch::Tensor output_weight_grads = torch::empty_like(output_weights); // Intermediate Tensor Allocations at::Tensor output_lin_grads = torch::empty_like(matmul2_results); at::Tensor matmul2_grads = torch::empty_like(softmax_results); at::Tensor input_lin_q_output_grads = torch::empty_like(input_lin_q_results); at::Tensor input_lin_kv_output_grads = torch::empty_like(input_lin_kv_results); auto q_lin_results_ptr = static_cast<half*>(input_lin_q_results.data_ptr()); auto k_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()); auto v_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim; auto q_lin_grads_ptr = static_cast<half*>(input_lin_q_output_grads.data_ptr()); auto k_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()); auto v_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()) + head_dim; char a_layout_n{'n'}; char a_layout_t{'t'}; char b_layout_n{'n'}; char b_layout_t{'t'}; THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); // Input Linear Q Fwd THCublasCheck(cublasGemmEx(handle, CUBLAS_OP_T, // A transpose CUBLAS_OP_N, // B wo/ transpose output_lin_q_dim, // embed_dim batches_q, // bsz x len_q embed_dim, // embed_dim static_cast<const void*>(&alpha), static_cast<const void*>(input_weights_q.data_ptr()), // weight emb_out x emb_in transposed CUDA_R_16F, embed_dim, // lda so A has size [lda x m] -> [embed_dim x output_lin_q_dim] static_cast<const void*>(inputs_q.data_ptr()), // input Q CUDA_R_16F, embed_dim, // ldb B has size [lda xn] -> [embed_dim x batches_q] static_cast<const void*>(&beta), // beta q_lin_results_ptr, // C -> emb * B CUDA_R_16F, output_lin_q_dim, // ldc C [lda x n] -> [embed_dim x batches_q] CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Input Linear KV Fwd THCublasCheck(cublasGemmEx(handle, CUBLAS_OP_T, CUBLAS_OP_N, output_lin_kv_dim, batches_kv, embed_dim, static_cast<const void*>(&alpha), static_cast<const void*>(input_weights_kv.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(inputs_kv.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(&beta), k_lin_results_ptr, CUDA_R_16F, output_lin_kv_dim, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // MatMul1 of Dot-Product Attention Plus scaling by 1/Sqrt(head size) gemm_switch_fp32accum( state, a_layout_t, b_layout_n, k_seq_len, // m q_seq_len, // n head_dim, // k scale, static_cast<const half*>(k_lin_results_ptr), lead_dim_kv, // lda batch_stride_kv, //strideA static_cast<const half*>(q_lin_results_ptr), lead_dim_q, // ldb batch_stride_q, //strideB beta, static_cast<half*>(attn_scores_ptr), // [attn_batches * len_q * len_k] k_seq_len, // ldc k_seq_len*q_seq_len, // stride c attn_batches); // p // need to call padding from torch interface here. attn_scores.view({sequences, heads, q_seq_len, k_seq_len}).masked_fill_(pad_mask, -std::numeric_limits<float>::infinity()); attn_scores.view({sequences*heads, q_seq_len, k_seq_len}); bool softmax_success = false; // run softmax dropout again but don't change the dropout mask softmax_success = dispatch_softmax_dropout_presampled<half, half, float>( reinterpret_cast<half*>(dropout_results_ptr), reinterpret_cast<half*>(softmax_results_ptr), reinterpret_cast<const uint8_t*>(dropout_mask.data_ptr<uint8_t>()), reinterpret_cast<const half*>(attn_scores_ptr), dropout_elems, k_seq_len, k_seq_len, attn_batches*q_seq_len, (1.0f - dropout_prob), stream); assert(softmax_success); // Matmul2 // matrix kv has size len_k * batch_size * (2 * heads * head_dim) // dropout results [bsz*heads, len_q, len_k] // matmul2_results is [len_q x attn_batches x head_dim] gemm_switch_fp32accum( state, a_layout_n, b_layout_n, head_dim, // m q_seq_len, // n k_seq_len, // k alpha, static_cast<const half*>(v_lin_results_ptr), // A_i [head_dimxk_seq_len] lead_dim_kv, // attn_batches * 2 *head_dim batch_stride_kv, // stride = 2 * head_dim static_cast<const half*>(dropout_results.data_ptr()), // B_i [k_seq_len x q_seq_len] k_seq_len, // lead_dim k_seq_len*q_seq_len, // stride beta, static_cast<half*>(matmul2_results.data_ptr()), head_dim*attn_batches, // ldc head_dim, // stride c attn_batches); //p ////////////////////////////////////////// Recomputation Done ///////////////////////////////////// // Output Linear Dgrad // C = alpha * op(A) op(B) + BetaC // op(A): mxk, op(B): kxn C: mxn THCublasCheck(cublasGemmEx(handle, // CUBLAS_OP_N, // no transpose CUBLAS_OP_N, // no transpose embed_dim, // m batches_q, // n = bsz * len_q embed_dim, // k static_cast<const void*>(&alpha), // alpha = 1.0 static_cast<const void*>(output_weights.data_ptr()), // A mxk CUDA_R_16F, // data type embed_dim, // leading dimension of A (embed dim) (the rows) static_cast<const void*>(output_grads.data_ptr()), // B kxn CUDA_R_16F, // data type embed_dim, // leading dimension of B (embed dim) static_cast<const void*>(&beta), // beta static_cast<void*>(output_lin_grads.data_ptr()), // C mxn CUDA_R_16F, // data type embed_dim, // ldc CUDA_R_32F, // compute type CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Output Linear Wgrad THCublasCheck(cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_T, embed_dim, embed_dim, batches_q, static_cast<const void*>(&alpha), static_cast<const void*>(matmul2_results.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(output_grads.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(&beta), static_cast<void*>(output_weight_grads.data_ptr()), CUDA_R_16F, embed_dim, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // MatMul2 Dgrad1 gemm_switch_fp32accum( state, a_layout_t, b_layout_n, k_seq_len, q_seq_len, head_dim, alpha, static_cast<const half*>(v_lin_results_ptr), lead_dim_kv, batch_stride_kv, // 2 * head_dim static_cast<const half*>(output_lin_grads.data_ptr()), head_dim*attn_batches, head_dim, beta, static_cast<half*>(matmul2_grads.data_ptr()), k_seq_len, k_seq_len*q_seq_len, attn_batches); // Matmul2 Dgrad2 gemm_switch_fp32accum( state, a_layout_n, b_layout_t, head_dim, k_seq_len, q_seq_len, alpha, static_cast<const half*>(output_lin_grads.data_ptr()), head_dim*attn_batches, head_dim, static_cast<const half*>(dropout_results.data_ptr()), k_seq_len, k_seq_len*q_seq_len, beta, v_lin_grads_ptr, lead_dim_kv, batch_stride_kv, attn_batches); dispatch_masked_scale_softmax_backward_recompute<half, half, float, false>( static_cast<half*>(matmul2_grads.data_ptr()), static_cast<half* const>(matmul2_grads.data_ptr()), reinterpret_cast<half const*>(softmax_results.data_ptr()), static_cast<uint8_t const*>(dropout_mask.data_ptr()), 1.0/(1.0-dropout_prob), k_seq_len, k_seq_len, attn_batches*q_seq_len, stream); // Matmul1 Dgrad1 gemm_switch_fp32accum( state, a_layout_n, b_layout_n, head_dim, q_seq_len, k_seq_len, scale, k_lin_results_ptr, lead_dim_kv, batch_stride_kv, static_cast<half*>(matmul2_grads.data_ptr()), k_seq_len, k_seq_len*q_seq_len, beta, q_lin_grads_ptr, lead_dim_q, batch_stride_q, attn_batches); // Matmul1 Dgrad2 gemm_switch_fp32accum( state, a_layout_n, b_layout_t, head_dim, k_seq_len, q_seq_len, scale, q_lin_results_ptr, lead_dim_q, batch_stride_q, static_cast<half*>(matmul2_grads.data_ptr()), k_seq_len, k_seq_len*q_seq_len, beta, k_lin_grads_ptr, lead_dim_kv, batch_stride_kv, attn_batches); // Input Linear Q Dgrad THCublasCheck(cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, embed_dim, batches_q, output_lin_q_dim, static_cast<const void*>(&alpha), static_cast<const void*>(input_weights_q.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(q_lin_grads_ptr), CUDA_R_16F, output_lin_q_dim, static_cast<const void*>(&beta), static_cast<void*>(input_q_grads.data_ptr()), CUDA_R_16F, embed_dim, CUDA_R_32F, //CUBLAS_GEMM_ALGO10_TENSOR_OP)); CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Input Linear Q Wgrad THCublasCheck(cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_T, embed_dim, output_lin_q_dim, batches_q, static_cast<const void*>(&alpha), static_cast<const void*>(inputs_q.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(q_lin_grads_ptr), CUDA_R_16F, output_lin_q_dim, static_cast<const void*>(&beta), static_cast<void*>(input_weight_q_grads.data_ptr()), CUDA_R_16F, embed_dim, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Input Linear KV Dgrad THCublasCheck(cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, embed_dim, batches_kv, output_lin_kv_dim, static_cast<const void*>(&alpha), static_cast<const void*>(input_weights_kv.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(k_lin_grads_ptr), CUDA_R_16F, output_lin_kv_dim, static_cast<const void*>(&beta), static_cast<void*>(input_kv_grads.data_ptr()), CUDA_R_16F, embed_dim, CUDA_R_32F, //CUBLAS_GEMM_ALGO10_TENSOR_OP)); CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Input Linear KV Wgrad THCublasCheck(cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_T, embed_dim, output_lin_kv_dim, batches_kv, static_cast<const void*>(&alpha), static_cast<const void*>(inputs_kv.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(k_lin_grads_ptr), CUDA_R_16F, output_lin_kv_dim, static_cast<const void*>(&beta), static_cast<void*>(input_weight_kv_grads.data_ptr()), CUDA_R_16F, embed_dim, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); return { input_q_grads, input_kv_grads, input_weight_q_grads, input_weight_kv_grads, output_weight_grads }; } } // end namespace cublas_gemmex } // end namespace encdec } // end namespace multihead_attn
8fa08f0ce79141227df910b36f1bbe007fd2cbf1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //===- fill.cu ------------------------------------------------*--- C++ -*-===// // // Copyright 2022 ByteDance Ltd. and/or its affiliates. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //===----------------------------------------------------------------------===// #include "./fill.h" // TODO: move to common header #define DIVUP(x, y) (((x) + (y)-1) / (y)) namespace brt { namespace cuda { namespace kernel { template <typename T, int NumThreadsPerBlock, int NumElementsPerThread> __global__ void _Fill(T *output_data, T val, int32_t N) { int32_t id = NumElementsPerThread * blockDim.x * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { output_data[id] = val; id += blockDim.x; } } } template <typename T> void Fill(hipStream_t stream, T *output, T value, size_t count) { constexpr int maxThreadsPerBlock = 256; constexpr int maxElementsPerThread = 4; int blocksPerGrid = static_cast<int>(DIVUP(count, maxThreadsPerBlock * maxElementsPerThread)); int32_t N = static_cast<int32_t>(count); hipLaunchKernelGGL(( _Fill<T, maxThreadsPerBlock, maxElementsPerThread>) , dim3(blocksPerGrid), dim3(maxThreadsPerBlock), 0, stream, output, value, N); } #define INST(T) template void Fill<T>(hipStream_t, T *, T, size_t); INST(float) INST(int64_t) INST(double) INST(__half) #undef INST } // namespace kernel } // namespace cuda } // namespace brt
8fa08f0ce79141227df910b36f1bbe007fd2cbf1.cu
//===- fill.cu ------------------------------------------------*--- C++ -*-===// // // Copyright 2022 ByteDance Ltd. and/or its affiliates. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //===----------------------------------------------------------------------===// #include "./fill.h" // TODO: move to common header #define DIVUP(x, y) (((x) + (y)-1) / (y)) namespace brt { namespace cuda { namespace kernel { template <typename T, int NumThreadsPerBlock, int NumElementsPerThread> __global__ void _Fill(T *output_data, T val, int32_t N) { int32_t id = NumElementsPerThread * blockDim.x * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { output_data[id] = val; id += blockDim.x; } } } template <typename T> void Fill(cudaStream_t stream, T *output, T value, size_t count) { constexpr int maxThreadsPerBlock = 256; constexpr int maxElementsPerThread = 4; int blocksPerGrid = static_cast<int>(DIVUP(count, maxThreadsPerBlock * maxElementsPerThread)); int32_t N = static_cast<int32_t>(count); _Fill<T, maxThreadsPerBlock, maxElementsPerThread> <<<blocksPerGrid, maxThreadsPerBlock, 0, stream>>>(output, value, N); } #define INST(T) template void Fill<T>(cudaStream_t, T *, T, size_t); INST(float) INST(int64_t) INST(double) INST(__half) #undef INST } // namespace kernel } // namespace cuda } // namespace brt
ad827097bc8aa35bde8d7099ca7e5f1040c57057.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <assert.h> //#include <math.h> #include "hip/hip_runtime.h" #include <hip/hip_runtime.h> #include <cusolverDn.h> #include <rocblas.h> #include <hipfft.h> #include "utilities.cuh" #define DEBUG #define PI_R 3.14159265358979323846f /*******************/ /* iDivUp FUNCTION */ /*******************/ //extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); } __host__ __device__ int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); } /********************/ /* CUDA ERROR CHECK */ /********************/ // --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) { exit(code); } } } extern "C" void gpuErrchk(hipError_t ans) { gpuAssert((ans), __FILE__, __LINE__); } /**************************/ /* CUSOLVE ERROR CHECKING */ /**************************/ #if (__CUDACC_VER__ >= 70000) static const char *_cusolverGetErrorEnum(cusolverStatus_t error) { switch (error) { case CUSOLVER_STATUS_SUCCESS: return "CUSOLVER_SUCCESS"; case CUSOLVER_STATUS_NOT_INITIALIZED: return "CUSOLVER_STATUS_NOT_INITIALIZED"; case CUSOLVER_STATUS_ALLOC_FAILED: return "CUSOLVER_STATUS_ALLOC_FAILED"; case CUSOLVER_STATUS_INVALID_VALUE: return "CUSOLVER_STATUS_INVALID_VALUE"; case CUSOLVER_STATUS_ARCH_MISMATCH: return "CUSOLVER_STATUS_ARCH_MISMATCH"; case CUSOLVER_STATUS_EXECUTION_FAILED: return "CUSOLVER_STATUS_EXECUTION_FAILED"; case CUSOLVER_STATUS_INTERNAL_ERROR: return "CUSOLVER_STATUS_INTERNAL_ERROR"; case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; } return "<unknown>"; } inline void __cusolveSafeCall(cusolverStatus_t err, const char *file, const int line) { if (CUSOLVER_STATUS_SUCCESS != err) { fprintf(stderr, "CUSOLVE error in file '%s', line %d, error: %s \nterminating!\n", __FILE__, __LINE__, \ _cusolverGetErrorEnum(err)); \ assert(0); \ } } extern "C" void cusolveSafeCall(cusolverStatus_t err) { __cusolveSafeCall(err, __FILE__, __LINE__); } #endif /*************************/ /* CUBLAS ERROR CHECKING */ /*************************/ static const char *_cublasGetErrorEnum(hipblasStatus_t error) { switch (error) { case HIPBLAS_STATUS_SUCCESS: return "HIPBLAS_STATUS_SUCCESS"; case HIPBLAS_STATUS_NOT_INITIALIZED: return "HIPBLAS_STATUS_NOT_INITIALIZED"; case HIPBLAS_STATUS_ALLOC_FAILED: return "HIPBLAS_STATUS_ALLOC_FAILED"; case HIPBLAS_STATUS_INVALID_VALUE: return "HIPBLAS_STATUS_INVALID_VALUE"; case HIPBLAS_STATUS_ARCH_MISMATCH: return "HIPBLAS_STATUS_ARCH_MISMATCH"; case HIPBLAS_STATUS_MAPPING_ERROR: return "HIPBLAS_STATUS_MAPPING_ERROR"; case HIPBLAS_STATUS_EXECUTION_FAILED: return "HIPBLAS_STATUS_EXECUTION_FAILED"; case HIPBLAS_STATUS_INTERNAL_ERROR: return "HIPBLAS_STATUS_INTERNAL_ERROR"; case HIPBLAS_STATUS_NOT_SUPPORTED: return "HIPBLAS_STATUS_NOT_SUPPORTED"; case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; } return "<unknown>"; } inline void __cublasSafeCall(hipblasStatus_t err, const char *file, const int line) { if (HIPBLAS_STATUS_SUCCESS != err) { fprintf(stderr, "CUBLAS error in file '%s', line %d, error: %s\nterminating!\n", __FILE__, __LINE__, \ _cublasGetErrorEnum(err)); \ assert(0); \ } } extern "C" void cublasSafeCall(hipblasStatus_t err) { __cublasSafeCall(err, __FILE__, __LINE__); } /************************/ /* CUFFT ERROR CHECKING */ /************************/ // See http://stackoverflow.com/questions/16267149/cufft-error-handling static const char *_cudaGetErrorEnum(hipfftResult error) { switch (error) { case HIPFFT_SUCCESS: return "HIPFFT_SUCCESS - The cuFFT operation was successful"; case HIPFFT_INVALID_PLAN: return "HIPFFT_INVALID_PLAN - cuFFT was passed an invalid plan handle"; case HIPFFT_ALLOC_FAILED: return "HIPFFT_ALLOC_FAILED - cuFFT failed to allocate GPU or CPU memory"; case HIPFFT_INVALID_TYPE: return "HIPFFT_INVALID_TYPE - No longer used"; case HIPFFT_INVALID_VALUE: return "HIPFFT_INVALID_VALUE - User specified an invalid pointer or parameter"; case HIPFFT_INTERNAL_ERROR: return "HIPFFT_INTERNAL_ERROR - Driver or internal cuFFT library error"; case HIPFFT_EXEC_FAILED: return "HIPFFT_EXEC_FAILED - Failed to execute an FFT on the GPU"; case HIPFFT_SETUP_FAILED: return "HIPFFT_SETUP_FAILED - The cuFFT library failed to initialize"; case HIPFFT_INVALID_SIZE: return "HIPFFT_INVALID_SIZE - User specified an invalid transform size"; case HIPFFT_UNALIGNED_DATA: return "HIPFFT_UNALIGNED_DATA - No longer used"; case HIPFFT_INCOMPLETE_PARAMETER_LIST: return "HIPFFT_INCOMPLETE_PARAMETER_LIST - Missing parameters in call"; case HIPFFT_INVALID_DEVICE: return "HIPFFT_INVALID_DEVICE - Execution of a plan was on different GPU than plan creation"; case HIPFFT_PARSE_ERROR: return "HIPFFT_PARSE_ERROR - Internal plan database error"; case HIPFFT_NO_WORKSPACE: return "HIPFFT_NO_WORKSPACE - No workspace has been provided prior to plan execution"; case HIPFFT_NOT_IMPLEMENTED: return "HIPFFT_NOT_IMPLEMENTED - Function does not implement functionality for parameters given"; case HIPFFT_LICENSE_ERROR: return "HIPFFT_LICENSE_ERROR - Used in previous versions"; case HIPFFT_NOT_SUPPORTED: return "HIPFFT_NOT_SUPPORTED - Operation is not supported for parameters given"; } return "<unknown>"; } // --- CUFFTSAFECALL inline void cufftAssert(hipfftResult err, const char *file, const int line, bool abort = true) { if (HIPFFT_SUCCESS != err) { fprintf(stderr, "CUFFTassert: Error nr. %d - %s %s %d\n", err, _cudaGetErrorEnum(err), __FILE__, __LINE__); if (abort) exit(err); } } extern "C" void cufftSafeCall(hipfftResult err) { cufftAssert(err, __FILE__, __LINE__); } /***************************/ /* CUSPARSE ERROR CHECKING */ /***************************/ static const char *_cusparseGetErrorEnum(hipsparseStatus_t error) { switch (error) { case HIPSPARSE_STATUS_SUCCESS: return "HIPSPARSE_STATUS_SUCCESS"; case HIPSPARSE_STATUS_NOT_INITIALIZED: return "HIPSPARSE_STATUS_NOT_INITIALIZED"; case HIPSPARSE_STATUS_ALLOC_FAILED: return "HIPSPARSE_STATUS_ALLOC_FAILED"; case HIPSPARSE_STATUS_INVALID_VALUE: return "HIPSPARSE_STATUS_INVALID_VALUE"; case HIPSPARSE_STATUS_ARCH_MISMATCH: return "HIPSPARSE_STATUS_ARCH_MISMATCH"; case HIPSPARSE_STATUS_MAPPING_ERROR: return "HIPSPARSE_STATUS_MAPPING_ERROR"; case HIPSPARSE_STATUS_EXECUTION_FAILED: return "HIPSPARSE_STATUS_EXECUTION_FAILED"; case HIPSPARSE_STATUS_INTERNAL_ERROR: return "HIPSPARSE_STATUS_INTERNAL_ERROR"; case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; case HIPSPARSE_STATUS_ZERO_PIVOT: return "HIPSPARSE_STATUS_ZERO_PIVOT"; } return "<unknown>"; } inline void __cusparseSafeCall(hipsparseStatus_t err, const char *file, const int line) { if (HIPSPARSE_STATUS_SUCCESS != err) { fprintf(stderr, "CUSPARSE error in file '%s', line %d, error %s\nterminating!\n", __FILE__, __LINE__, \ _cusparseGetErrorEnum(err)); \ assert(0); \ } } extern "C" void cusparseSafeCall(hipsparseStatus_t err) { __cusparseSafeCall(err, __FILE__, __LINE__); } /************************/ /* REVERSE ARRAY KERNEL */ /************************/ #define BLOCKSIZE_REVERSE 256 // --- Credit to http://www.drdobbs.com/parallel/cuda-supercomputing-for-the-masses-part/208801731?pgno=2 template <class T> __global__ void reverseArrayKernel(const T * __restrict__ d_in, T * __restrict__ d_out, const int N, const T a) { // --- Credit to the simpleTemplates CUDA sample SharedMemory<T> smem; T* s_data = smem.getPointer(); const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int id = threadIdx.x; const int offset = blockDim.x * (blockIdx.x + 1); // --- Load one element per thread from device memory and store it *in reversed order* into shared memory if (tid < N) s_data[BLOCKSIZE_REVERSE - (id + 1)] = a * d_in[tid]; // --- Block until all threads in the block have written their data to shared memory __syncthreads(); // --- Write the data from shared memory in forward order if ((N - offset + id) >= 0) d_out[N - offset + id] = s_data[threadIdx.x]; } /************************/ /* REVERSE ARRAY KERNEL */ /************************/ template <class T> void reverseArray(const T * __restrict__ d_in, T * __restrict__ d_out, const int N, const T a) { reverseArrayKernel << <iDivUp(N, BLOCKSIZE_REVERSE), BLOCKSIZE_REVERSE, BLOCKSIZE_REVERSE * sizeof(T) >> >(d_in, d_out, N, a); #ifdef DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif } template void reverseArray<float>(const float * __restrict__, float * __restrict__, const int, const float); template void reverseArray<double>(const double * __restrict__, double * __restrict__, const int, const double); /********************************************************/ /* CARTESIAN TO POLAR COORDINATES TRANSFORMATION KERNEL */ /********************************************************/ #define BLOCKSIZE_CART2POL 256 template <class T> __global__ void Cartesian2PolarKernel(const T * __restrict__ d_x, const T * __restrict__ d_y, T * __restrict__ d_rho, T * __restrict__ d_theta, const int N, const T a) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < N) { d_rho[tid] = a * hypot(d_x[tid], d_y[tid]); d_theta[tid] = atan2(d_y[tid], d_x[tid]); } } /*******************************************************/ /* CARTESIAN TO POLAR COORDINATES TRANSFORMATION - GPU */ /*******************************************************/ //template <class T> //thrust::pair<T *,T *> Cartesian2Polar(const T * __restrict__ d_x, const T * __restrict__ d_y, const int N, const T a) { // // T *d_rho; gpuErrchk(hipMalloc((void**)&d_rho, N * sizeof(T))); // T *d_theta; gpuErrchk(hipMalloc((void**)&d_theta, N * sizeof(T))); // // Cartesian2PolarKernel<<<iDivUp(N, BLOCKSIZE_CART2POL), BLOCKSIZE_CART2POL>>>(d_x, d_y, d_rho, d_theta, N, a); //#ifdef DEBUG // gpuErrchk(hipPeekAtLastError()); // gpuErrchk(hipDeviceSynchronize()); //#endif // // return thrust::make_pair(d_rho, d_theta); //} // //template thrust::pair<float *, float *> Cartesian2Polar<float> (const float *, const float *, const int, const float); //template thrust::pair<double *, double *> Cartesian2Polar<double> (const double *, const double *, const int, const double); /*******************************************************/ /* CARTESIAN TO POLAR COORDINATES TRANSFORMATION - CPU */ /*******************************************************/ //template <class T> //thrust::pair<T *,T *> h_Cartesian2Polar(const T * __restrict__ h_x, const T * __restrict__ h_y, const int N, const T a) { // // T *h_rho = (T *)malloc(N * sizeof(T)); // T *h_theta = (T *)malloc(N * sizeof(T)); // // for (int i = 0; i < N; i++) { // h_rho[i] = a * hypot(h_x[i], h_y[i]); // h_theta[i] = atan2(h_y[i], h_x[i]); // } // // return thrust::make_pair(h_rho, h_theta); //} // //template thrust::pair<float *, float *> h_Cartesian2Polar<float> (const float *, const float *, const int, const float); //template thrust::pair<double *, double *> h_Cartesian2Polar<double> (const double *, const double *, const int, const double); /*******************************/ /* COMPUTE L2 NORM OF A VECTOR */ /*******************************/ template<class T> T h_l2_norm(T *v1, T *v2, const int N) { T norm = (T)0; for (int i = 0; i < N; ++i) { T d = v1[i] - v2[i]; norm = norm + d * d; } return sqrt(norm); } template float h_l2_norm<float>(float *, float *, const int); template double h_l2_norm<double>(double *, double *, const int); /*******************************/ /* LINEAR COMBINATION FUNCTION */ /*******************************/ void linearCombination(const float * __restrict__ d_coeff, const float * __restrict__ d_basis_functions_real, float * __restrict__ d_linear_combination, const int N_basis_functions, const int N_sampling_points, const hipblasHandle_t handle) { float alpha = 1.f; float beta = 0.f; cublasSafeCall(hipblasSgemv(handle, HIPBLAS_OP_N, N_sampling_points, N_basis_functions, &alpha, d_basis_functions_real, N_sampling_points, d_coeff, 1, &beta, d_linear_combination, 1)); } void linearCombination(const double * __restrict__ d_coeff, const double * __restrict__ d_basis_functions_real, double * __restrict__ d_linear_combination, const int N_basis_functions, const int N_sampling_points, const hipblasHandle_t handle) { double alpha = 1.; double beta = 0.; cublasSafeCall(hipblasDgemv(handle, HIPBLAS_OP_N, N_sampling_points, N_basis_functions, &alpha, d_basis_functions_real, N_sampling_points, d_coeff, 1, &beta, d_linear_combination, 1)); } /******************************/ /* ADD A CONSTANT TO A VECTOR */ /******************************/ #define BLOCKSIZE_VECTORADDCONSTANT 256 template<class T> __global__ void vectorAddConstantKernel(T * __restrict__ d_in, const T scalar, const int N) { const int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid < N) d_in[tid] += scalar; } template<class T> void vectorAddConstant(T * __restrict__ d_in, const T scalar, const int N) { vectorAddConstantKernel << <iDivUp(N, BLOCKSIZE_VECTORADDCONSTANT), BLOCKSIZE_VECTORADDCONSTANT >> >(d_in, scalar, N); } template void vectorAddConstant<float>(float * __restrict__, const float, const int); template void vectorAddConstant<double>(double * __restrict__, const double, const int); /*****************************************/ /* MULTIPLY A VECTOR BY A CONSTANT - GPU */ /*****************************************/ #define BLOCKSIZE_VECTORMULCONSTANT 256 template<class T> __global__ void vectorMulConstantKernel(T * __restrict__ d_in, const T scalar, const int N) { const int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid < N) d_in[tid] *= scalar; } template<class T> void vectorMulConstant(T * __restrict__ d_in, const T scalar, const int N) { vectorMulConstantKernel << <iDivUp(N, BLOCKSIZE_VECTORMULCONSTANT), BLOCKSIZE_VECTORMULCONSTANT >> >(d_in, scalar, N); } template void vectorMulConstant<float>(float * __restrict__, const float, const int); template void vectorMulConstant<double>(double * __restrict__, const double, const int); /*****************************************/ /* MULTIPLY A VECTOR BY A CONSTANT - CPU */ /*****************************************/ template<class T> void h_vectorMulConstant(T * __restrict__ h_in, const T scalar, const int N) { for (int i = 0; i < N; i++) h_in[i] *= scalar; } template void h_vectorMulConstant<float>(float * __restrict__, const float, const int); template void h_vectorMulConstant<double>(double * __restrict__, const double, const int); /*****************************************************/ /* FUSED MULTIPLY ADD OPERATIONS FOR HOST AND DEVICE */ /*****************************************************/ template<class T> __host__ __device__ T fma2(T x, T y, T z) { return x * y + z; } template float fma2<float >(float, float, float); template double fma2<double>(double, double, double); /*******************/ /* MODULO FUNCTION */ /*******************/ __device__ int modulo(int val, int _mod) { int P; if (val > 0) { (!(_mod & (_mod - 1)) ? P = val&(_mod - 1) : P = val % (_mod)); return P; } else { (!(_mod & (_mod - 1)) ? P = (-val)&(_mod - 1) : P = (-val) % (_mod)); if (P > 0) return _mod - P; else return 0; } } /***************************************/ /* ATOMIC ADDITION FUNCTION ON DOUBLES */ /***************************************/ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; register unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif /*********************************/ /* ATOMIC MIN FUNCTION ON FLOATS */ /*********************************/ __device__ float atomicMin(float* address, float val) { int* address_as_i = (int*)address; int old = *address_as_i, assumed; do { assumed = old; old = ::atomicCAS(address_as_i, assumed, __float_as_int(::fminf(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } /*********************/ /* DEGREE TO RADIANS */ /*********************/ double deg2rad(double deg) { return deg*PI_R / 180; } /*********************/ /* CUDA MEMORY USAGE */ /*********************/ void cudaMemoryUsage() { size_t free_byte; size_t total_byte; gpuErrchk(hipMemGetInfo(&free_byte, &total_byte)); double free_db = (double)free_byte; double total_db = (double)total_byte; double used_db = total_db - free_db; printf("GPU memory: used = %f, free = %f MB, total available = %f MB\n", used_db / 1024.0 / 1024.0, free_db / 1024.0 / 1024.0, total_db / 1024.0 / 1024.0); }
ad827097bc8aa35bde8d7099ca7e5f1040c57057.cu
#include <stdio.h> #include <assert.h> //#include <math.h> #include "cuda_runtime.h" #include <cuda.h> #include <cusolverDn.h> #include <cublas_v2.h> #include <cufft.h> #include "utilities.cuh" #define DEBUG #define PI_R 3.14159265358979323846f /*******************/ /* iDivUp FUNCTION */ /*******************/ //extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); } __host__ __device__ int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); } /********************/ /* CUDA ERROR CHECK */ /********************/ // --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) { exit(code); } } } extern "C" void gpuErrchk(cudaError_t ans) { gpuAssert((ans), __FILE__, __LINE__); } /**************************/ /* CUSOLVE ERROR CHECKING */ /**************************/ #if (__CUDACC_VER__ >= 70000) static const char *_cusolverGetErrorEnum(cusolverStatus_t error) { switch (error) { case CUSOLVER_STATUS_SUCCESS: return "CUSOLVER_SUCCESS"; case CUSOLVER_STATUS_NOT_INITIALIZED: return "CUSOLVER_STATUS_NOT_INITIALIZED"; case CUSOLVER_STATUS_ALLOC_FAILED: return "CUSOLVER_STATUS_ALLOC_FAILED"; case CUSOLVER_STATUS_INVALID_VALUE: return "CUSOLVER_STATUS_INVALID_VALUE"; case CUSOLVER_STATUS_ARCH_MISMATCH: return "CUSOLVER_STATUS_ARCH_MISMATCH"; case CUSOLVER_STATUS_EXECUTION_FAILED: return "CUSOLVER_STATUS_EXECUTION_FAILED"; case CUSOLVER_STATUS_INTERNAL_ERROR: return "CUSOLVER_STATUS_INTERNAL_ERROR"; case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; } return "<unknown>"; } inline void __cusolveSafeCall(cusolverStatus_t err, const char *file, const int line) { if (CUSOLVER_STATUS_SUCCESS != err) { fprintf(stderr, "CUSOLVE error in file '%s', line %d, error: %s \nterminating!\n", __FILE__, __LINE__, \ _cusolverGetErrorEnum(err)); \ assert(0); \ } } extern "C" void cusolveSafeCall(cusolverStatus_t err) { __cusolveSafeCall(err, __FILE__, __LINE__); } #endif /*************************/ /* CUBLAS ERROR CHECKING */ /*************************/ static const char *_cublasGetErrorEnum(cublasStatus_t error) { switch (error) { case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED"; case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; } return "<unknown>"; } inline void __cublasSafeCall(cublasStatus_t err, const char *file, const int line) { if (CUBLAS_STATUS_SUCCESS != err) { fprintf(stderr, "CUBLAS error in file '%s', line %d, error: %s\nterminating!\n", __FILE__, __LINE__, \ _cublasGetErrorEnum(err)); \ assert(0); \ } } extern "C" void cublasSafeCall(cublasStatus_t err) { __cublasSafeCall(err, __FILE__, __LINE__); } /************************/ /* CUFFT ERROR CHECKING */ /************************/ // See http://stackoverflow.com/questions/16267149/cufft-error-handling static const char *_cudaGetErrorEnum(cufftResult error) { switch (error) { case CUFFT_SUCCESS: return "CUFFT_SUCCESS - The cuFFT operation was successful"; case CUFFT_INVALID_PLAN: return "CUFFT_INVALID_PLAN - cuFFT was passed an invalid plan handle"; case CUFFT_ALLOC_FAILED: return "CUFFT_ALLOC_FAILED - cuFFT failed to allocate GPU or CPU memory"; case CUFFT_INVALID_TYPE: return "CUFFT_INVALID_TYPE - No longer used"; case CUFFT_INVALID_VALUE: return "CUFFT_INVALID_VALUE - User specified an invalid pointer or parameter"; case CUFFT_INTERNAL_ERROR: return "CUFFT_INTERNAL_ERROR - Driver or internal cuFFT library error"; case CUFFT_EXEC_FAILED: return "CUFFT_EXEC_FAILED - Failed to execute an FFT on the GPU"; case CUFFT_SETUP_FAILED: return "CUFFT_SETUP_FAILED - The cuFFT library failed to initialize"; case CUFFT_INVALID_SIZE: return "CUFFT_INVALID_SIZE - User specified an invalid transform size"; case CUFFT_UNALIGNED_DATA: return "CUFFT_UNALIGNED_DATA - No longer used"; case CUFFT_INCOMPLETE_PARAMETER_LIST: return "CUFFT_INCOMPLETE_PARAMETER_LIST - Missing parameters in call"; case CUFFT_INVALID_DEVICE: return "CUFFT_INVALID_DEVICE - Execution of a plan was on different GPU than plan creation"; case CUFFT_PARSE_ERROR: return "CUFFT_PARSE_ERROR - Internal plan database error"; case CUFFT_NO_WORKSPACE: return "CUFFT_NO_WORKSPACE - No workspace has been provided prior to plan execution"; case CUFFT_NOT_IMPLEMENTED: return "CUFFT_NOT_IMPLEMENTED - Function does not implement functionality for parameters given"; case CUFFT_LICENSE_ERROR: return "CUFFT_LICENSE_ERROR - Used in previous versions"; case CUFFT_NOT_SUPPORTED: return "CUFFT_NOT_SUPPORTED - Operation is not supported for parameters given"; } return "<unknown>"; } // --- CUFFTSAFECALL inline void cufftAssert(cufftResult err, const char *file, const int line, bool abort = true) { if (CUFFT_SUCCESS != err) { fprintf(stderr, "CUFFTassert: Error nr. %d - %s %s %d\n", err, _cudaGetErrorEnum(err), __FILE__, __LINE__); if (abort) exit(err); } } extern "C" void cufftSafeCall(cufftResult err) { cufftAssert(err, __FILE__, __LINE__); } /***************************/ /* CUSPARSE ERROR CHECKING */ /***************************/ static const char *_cusparseGetErrorEnum(cusparseStatus_t error) { switch (error) { case CUSPARSE_STATUS_SUCCESS: return "CUSPARSE_STATUS_SUCCESS"; case CUSPARSE_STATUS_NOT_INITIALIZED: return "CUSPARSE_STATUS_NOT_INITIALIZED"; case CUSPARSE_STATUS_ALLOC_FAILED: return "CUSPARSE_STATUS_ALLOC_FAILED"; case CUSPARSE_STATUS_INVALID_VALUE: return "CUSPARSE_STATUS_INVALID_VALUE"; case CUSPARSE_STATUS_ARCH_MISMATCH: return "CUSPARSE_STATUS_ARCH_MISMATCH"; case CUSPARSE_STATUS_MAPPING_ERROR: return "CUSPARSE_STATUS_MAPPING_ERROR"; case CUSPARSE_STATUS_EXECUTION_FAILED: return "CUSPARSE_STATUS_EXECUTION_FAILED"; case CUSPARSE_STATUS_INTERNAL_ERROR: return "CUSPARSE_STATUS_INTERNAL_ERROR"; case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; case CUSPARSE_STATUS_ZERO_PIVOT: return "CUSPARSE_STATUS_ZERO_PIVOT"; } return "<unknown>"; } inline void __cusparseSafeCall(cusparseStatus_t err, const char *file, const int line) { if (CUSPARSE_STATUS_SUCCESS != err) { fprintf(stderr, "CUSPARSE error in file '%s', line %d, error %s\nterminating!\n", __FILE__, __LINE__, \ _cusparseGetErrorEnum(err)); \ assert(0); \ } } extern "C" void cusparseSafeCall(cusparseStatus_t err) { __cusparseSafeCall(err, __FILE__, __LINE__); } /************************/ /* REVERSE ARRAY KERNEL */ /************************/ #define BLOCKSIZE_REVERSE 256 // --- Credit to http://www.drdobbs.com/parallel/cuda-supercomputing-for-the-masses-part/208801731?pgno=2 template <class T> __global__ void reverseArrayKernel(const T * __restrict__ d_in, T * __restrict__ d_out, const int N, const T a) { // --- Credit to the simpleTemplates CUDA sample SharedMemory<T> smem; T* s_data = smem.getPointer(); const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int id = threadIdx.x; const int offset = blockDim.x * (blockIdx.x + 1); // --- Load one element per thread from device memory and store it *in reversed order* into shared memory if (tid < N) s_data[BLOCKSIZE_REVERSE - (id + 1)] = a * d_in[tid]; // --- Block until all threads in the block have written their data to shared memory __syncthreads(); // --- Write the data from shared memory in forward order if ((N - offset + id) >= 0) d_out[N - offset + id] = s_data[threadIdx.x]; } /************************/ /* REVERSE ARRAY KERNEL */ /************************/ template <class T> void reverseArray(const T * __restrict__ d_in, T * __restrict__ d_out, const int N, const T a) { reverseArrayKernel << <iDivUp(N, BLOCKSIZE_REVERSE), BLOCKSIZE_REVERSE, BLOCKSIZE_REVERSE * sizeof(T) >> >(d_in, d_out, N, a); #ifdef DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif } template void reverseArray<float>(const float * __restrict__, float * __restrict__, const int, const float); template void reverseArray<double>(const double * __restrict__, double * __restrict__, const int, const double); /********************************************************/ /* CARTESIAN TO POLAR COORDINATES TRANSFORMATION KERNEL */ /********************************************************/ #define BLOCKSIZE_CART2POL 256 template <class T> __global__ void Cartesian2PolarKernel(const T * __restrict__ d_x, const T * __restrict__ d_y, T * __restrict__ d_rho, T * __restrict__ d_theta, const int N, const T a) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < N) { d_rho[tid] = a * hypot(d_x[tid], d_y[tid]); d_theta[tid] = atan2(d_y[tid], d_x[tid]); } } /*******************************************************/ /* CARTESIAN TO POLAR COORDINATES TRANSFORMATION - GPU */ /*******************************************************/ //template <class T> //thrust::pair<T *,T *> Cartesian2Polar(const T * __restrict__ d_x, const T * __restrict__ d_y, const int N, const T a) { // // T *d_rho; gpuErrchk(cudaMalloc((void**)&d_rho, N * sizeof(T))); // T *d_theta; gpuErrchk(cudaMalloc((void**)&d_theta, N * sizeof(T))); // // Cartesian2PolarKernel<<<iDivUp(N, BLOCKSIZE_CART2POL), BLOCKSIZE_CART2POL>>>(d_x, d_y, d_rho, d_theta, N, a); //#ifdef DEBUG // gpuErrchk(cudaPeekAtLastError()); // gpuErrchk(cudaDeviceSynchronize()); //#endif // // return thrust::make_pair(d_rho, d_theta); //} // //template thrust::pair<float *, float *> Cartesian2Polar<float> (const float *, const float *, const int, const float); //template thrust::pair<double *, double *> Cartesian2Polar<double> (const double *, const double *, const int, const double); /*******************************************************/ /* CARTESIAN TO POLAR COORDINATES TRANSFORMATION - CPU */ /*******************************************************/ //template <class T> //thrust::pair<T *,T *> h_Cartesian2Polar(const T * __restrict__ h_x, const T * __restrict__ h_y, const int N, const T a) { // // T *h_rho = (T *)malloc(N * sizeof(T)); // T *h_theta = (T *)malloc(N * sizeof(T)); // // for (int i = 0; i < N; i++) { // h_rho[i] = a * hypot(h_x[i], h_y[i]); // h_theta[i] = atan2(h_y[i], h_x[i]); // } // // return thrust::make_pair(h_rho, h_theta); //} // //template thrust::pair<float *, float *> h_Cartesian2Polar<float> (const float *, const float *, const int, const float); //template thrust::pair<double *, double *> h_Cartesian2Polar<double> (const double *, const double *, const int, const double); /*******************************/ /* COMPUTE L2 NORM OF A VECTOR */ /*******************************/ template<class T> T h_l2_norm(T *v1, T *v2, const int N) { T norm = (T)0; for (int i = 0; i < N; ++i) { T d = v1[i] - v2[i]; norm = norm + d * d; } return sqrt(norm); } template float h_l2_norm<float>(float *, float *, const int); template double h_l2_norm<double>(double *, double *, const int); /*******************************/ /* LINEAR COMBINATION FUNCTION */ /*******************************/ void linearCombination(const float * __restrict__ d_coeff, const float * __restrict__ d_basis_functions_real, float * __restrict__ d_linear_combination, const int N_basis_functions, const int N_sampling_points, const cublasHandle_t handle) { float alpha = 1.f; float beta = 0.f; cublasSafeCall(cublasSgemv(handle, CUBLAS_OP_N, N_sampling_points, N_basis_functions, &alpha, d_basis_functions_real, N_sampling_points, d_coeff, 1, &beta, d_linear_combination, 1)); } void linearCombination(const double * __restrict__ d_coeff, const double * __restrict__ d_basis_functions_real, double * __restrict__ d_linear_combination, const int N_basis_functions, const int N_sampling_points, const cublasHandle_t handle) { double alpha = 1.; double beta = 0.; cublasSafeCall(cublasDgemv(handle, CUBLAS_OP_N, N_sampling_points, N_basis_functions, &alpha, d_basis_functions_real, N_sampling_points, d_coeff, 1, &beta, d_linear_combination, 1)); } /******************************/ /* ADD A CONSTANT TO A VECTOR */ /******************************/ #define BLOCKSIZE_VECTORADDCONSTANT 256 template<class T> __global__ void vectorAddConstantKernel(T * __restrict__ d_in, const T scalar, const int N) { const int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid < N) d_in[tid] += scalar; } template<class T> void vectorAddConstant(T * __restrict__ d_in, const T scalar, const int N) { vectorAddConstantKernel << <iDivUp(N, BLOCKSIZE_VECTORADDCONSTANT), BLOCKSIZE_VECTORADDCONSTANT >> >(d_in, scalar, N); } template void vectorAddConstant<float>(float * __restrict__, const float, const int); template void vectorAddConstant<double>(double * __restrict__, const double, const int); /*****************************************/ /* MULTIPLY A VECTOR BY A CONSTANT - GPU */ /*****************************************/ #define BLOCKSIZE_VECTORMULCONSTANT 256 template<class T> __global__ void vectorMulConstantKernel(T * __restrict__ d_in, const T scalar, const int N) { const int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid < N) d_in[tid] *= scalar; } template<class T> void vectorMulConstant(T * __restrict__ d_in, const T scalar, const int N) { vectorMulConstantKernel << <iDivUp(N, BLOCKSIZE_VECTORMULCONSTANT), BLOCKSIZE_VECTORMULCONSTANT >> >(d_in, scalar, N); } template void vectorMulConstant<float>(float * __restrict__, const float, const int); template void vectorMulConstant<double>(double * __restrict__, const double, const int); /*****************************************/ /* MULTIPLY A VECTOR BY A CONSTANT - CPU */ /*****************************************/ template<class T> void h_vectorMulConstant(T * __restrict__ h_in, const T scalar, const int N) { for (int i = 0; i < N; i++) h_in[i] *= scalar; } template void h_vectorMulConstant<float>(float * __restrict__, const float, const int); template void h_vectorMulConstant<double>(double * __restrict__, const double, const int); /*****************************************************/ /* FUSED MULTIPLY ADD OPERATIONS FOR HOST AND DEVICE */ /*****************************************************/ template<class T> __host__ __device__ T fma2(T x, T y, T z) { return x * y + z; } template float fma2<float >(float, float, float); template double fma2<double>(double, double, double); /*******************/ /* MODULO FUNCTION */ /*******************/ __device__ int modulo(int val, int _mod) { int P; if (val > 0) { (!(_mod & (_mod - 1)) ? P = val&(_mod - 1) : P = val % (_mod)); return P; } else { (!(_mod & (_mod - 1)) ? P = (-val)&(_mod - 1) : P = (-val) % (_mod)); if (P > 0) return _mod - P; else return 0; } } /***************************************/ /* ATOMIC ADDITION FUNCTION ON DOUBLES */ /***************************************/ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; register unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif /*********************************/ /* ATOMIC MIN FUNCTION ON FLOATS */ /*********************************/ __device__ float atomicMin(float* address, float val) { int* address_as_i = (int*)address; int old = *address_as_i, assumed; do { assumed = old; old = ::atomicCAS(address_as_i, assumed, __float_as_int(::fminf(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } /*********************/ /* DEGREE TO RADIANS */ /*********************/ double deg2rad(double deg) { return deg*PI_R / 180; } /*********************/ /* CUDA MEMORY USAGE */ /*********************/ void cudaMemoryUsage() { size_t free_byte; size_t total_byte; gpuErrchk(cudaMemGetInfo(&free_byte, &total_byte)); double free_db = (double)free_byte; double total_db = (double)total_byte; double used_db = total_db - free_db; printf("GPU memory: used = %f, free = %f MB, total available = %f MB\n", used_db / 1024.0 / 1024.0, free_db / 1024.0 / 1024.0, total_db / 1024.0 / 1024.0); }
61899ba171394eb101c94fa1fc2569f412a2b681.hip
// !!! This is a file automatically generated by hipify!!! /**************************************************************************************** All-paired shortest path implementation in CUDA (single GPU version) Optimization: 1. Unroll 2. shared memory in phase 3 Author: Chan-Wei Hu ******************************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <assert.h> #include <math.h> #include <omp.h> #define inf 1e9 static int block_dim = 32; // phase 1 kernel (done!!) __global__ void Phase_1(int *adj_mat_d, int round, int block_dim, int comp_V) { int i = threadIdx.y, j = threadIdx.x, offset = block_dim * round; extern __shared__ int shared_mem[]; shared_mem[i * block_dim + j] = adj_mat_d[(i + offset) * comp_V + (j + offset)]; __syncthreads(); #pragma unroll for(int k = 0; k < block_dim; k++){ if (shared_mem[i * block_dim + j] > shared_mem[i * block_dim + k] + shared_mem[k * block_dim + j]){ shared_mem[i * block_dim + j] = shared_mem[i * block_dim + k] + shared_mem[k * block_dim + j]; } __syncthreads(); } adj_mat_d[(i + offset) * comp_V + (j + offset)] = shared_mem[i * block_dim + j]; __syncthreads(); } // phase 2 kernel (done !!!) __global__ void Phase_2(int* adj_mat_d, int round, int block_dim, int comp_V) { int total_round = comp_V/block_dim; int i = threadIdx.y, j = threadIdx.x, // column or row? i_off = blockIdx.x == 1? block_dim * ((blockIdx.y + round + 1) % total_round): block_dim * round, j_off = blockIdx.x == 1? block_dim * round : block_dim * ((blockIdx.y + round + 1) % total_round); extern __shared__ int shared_mem[]; shared_mem[i * block_dim + j] = adj_mat_d[(i + i_off) * comp_V + (j+j_off)]; shared_mem[(i + block_dim) * block_dim + j] = adj_mat_d[(i + i_off) * comp_V + j + round*block_dim]; shared_mem[(i + 2*block_dim) * block_dim + j] = adj_mat_d[(i + round * block_dim) * comp_V + (j + j_off)]; __syncthreads(); #pragma unroll for (int k = 0; k < block_dim; k++) { if (shared_mem[i * block_dim + j] > shared_mem[(i + block_dim) * block_dim + k] + shared_mem[(k + 2*block_dim) * block_dim + j]) { shared_mem[i * block_dim + j] = shared_mem[(i + block_dim) * block_dim + k] + shared_mem[(k + 2*block_dim) * block_dim + j]; if (round == i_off/block_dim) shared_mem[(i + 2*block_dim) * block_dim + j] = shared_mem[i * block_dim + j]; if (round == j_off/block_dim) shared_mem[(i + block_dim) * block_dim + j] = shared_mem[i * block_dim + j]; } } adj_mat_d[(i + i_off) * comp_V + (j+j_off)] = shared_mem[i * block_dim + j]; __syncthreads(); } // Phase 3 kernel (done !!!) __global__ void Phase_3(int* adj_mat_d, int round, int block_dim, int comp_V){ int i = threadIdx.y, j = threadIdx.x, i_off = block_dim * blockIdx.x, j_off = block_dim * blockIdx.y; extern __shared__ int shared_mem[]; shared_mem[i * block_dim + j] = adj_mat_d[(i + i_off) * comp_V + (j+j_off)]; shared_mem[(i + block_dim) * block_dim + j] = adj_mat_d[(i + i_off) * comp_V + j + round*block_dim]; shared_mem[(i + 2*block_dim) * block_dim + j] = adj_mat_d[(i + round * block_dim) * comp_V + (j + j_off)]; __syncthreads(); #pragma unroll for (int k = 0; k < block_dim; k++) { if (shared_mem[i * block_dim + j] > shared_mem[(i + block_dim) * block_dim + k] + shared_mem[(k + 2*block_dim) * block_dim + j]) shared_mem[i * block_dim + j] = shared_mem[(i + block_dim) * block_dim + k] + shared_mem[(k + 2*block_dim) * block_dim + j]; } adj_mat_d[(i + i_off) * comp_V + (j+j_off)] = shared_mem[i * block_dim + j]; __syncthreads(); } int main(int argc, char *argv[]){ /******************************* load data *********************************/ // only two arguments are allowed assert(argc == 3); int E, V; FILE *in_fp; in_fp = fopen(argv[1], "r"); if(in_fp == NULL) printf("Failed on opening file\n"); // read in data fread(&V, sizeof(int), 1, in_fp); fread(&E, sizeof(int), 1, in_fp); // compensate V to make V % block_dim == 0 int comp_V = V + (block_dim - ((V-1) % block_dim + 1)); //allocate memory int *adj_mat; size_t sz = comp_V * comp_V * sizeof(int); hipHostMalloc((void**) &adj_mat, sz); for(int i = 0; i < comp_V; i++){ for(int j = 0; j < comp_V; j++){ if(i == j) adj_mat[i*comp_V+j] = 0; else adj_mat[i*comp_V+j] = inf; } } // load data to graph int src, dst, w; while(E--){ fread(&src, sizeof(int), 1, in_fp); fread(&dst, sizeof(int), 1, in_fp); fread(&w, sizeof(int), 1, in_fp); adj_mat[src*comp_V+dst] = w; } fclose(in_fp); /****************************************************************************/ int round = ceil((float) comp_V/block_dim); int *adj_mat_d; // 2D block dim3 threads(block_dim, block_dim); dim3 p1(1, 1); dim3 p2(2, round-1); dim3 p3(round, round); //size_t sz = comp_V * comp_V * sizeof(int); hipSetDevice(0); // Malloc memory hipMalloc((void**) &adj_mat_d, sz); hipMemcpy(adj_mat_d, adj_mat, sz, hipMemcpyHostToDevice); for(int r = 0; r < round; r++){ hipLaunchKernelGGL(( Phase_1) , dim3(p1), dim3(threads), sizeof(int)*block_dim*block_dim , 0, adj_mat_d, r, block_dim, comp_V); // hipDeviceSynchronize(); hipLaunchKernelGGL(( Phase_2) , dim3(p2), dim3(threads), sizeof(int)*3*block_dim*block_dim , 0, adj_mat_d, r, block_dim, comp_V); // hipDeviceSynchronize(); hipLaunchKernelGGL(( Phase_3) , dim3(p3), dim3(threads), sizeof(int)*3*block_dim*block_dim , 0, adj_mat_d, r, block_dim, comp_V); } // copy back to host hipMemcpy(adj_mat, adj_mat_d, sz, hipMemcpyDeviceToHost); // output FILE *out_fp; out_fp = fopen(argv[2], "wb"); for(int i = 0; i < V; i++){ for(int j = 0; j < V; j++){ fwrite(adj_mat+i*comp_V+j, sizeof(int), 1, out_fp); } } fclose(out_fp); //free memory hipFree(adj_mat_d); hipHostFree(adj_mat); return 0; }
61899ba171394eb101c94fa1fc2569f412a2b681.cu
/**************************************************************************************** All-paired shortest path implementation in CUDA (single GPU version) Optimization: 1. Unroll 2. shared memory in phase 3 Author: Chan-Wei Hu ******************************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <assert.h> #include <math.h> #include <omp.h> #define inf 1e9 static int block_dim = 32; // phase 1 kernel (done!!) __global__ void Phase_1(int *adj_mat_d, int round, int block_dim, int comp_V) { int i = threadIdx.y, j = threadIdx.x, offset = block_dim * round; extern __shared__ int shared_mem[]; shared_mem[i * block_dim + j] = adj_mat_d[(i + offset) * comp_V + (j + offset)]; __syncthreads(); #pragma unroll for(int k = 0; k < block_dim; k++){ if (shared_mem[i * block_dim + j] > shared_mem[i * block_dim + k] + shared_mem[k * block_dim + j]){ shared_mem[i * block_dim + j] = shared_mem[i * block_dim + k] + shared_mem[k * block_dim + j]; } __syncthreads(); } adj_mat_d[(i + offset) * comp_V + (j + offset)] = shared_mem[i * block_dim + j]; __syncthreads(); } // phase 2 kernel (done !!!) __global__ void Phase_2(int* adj_mat_d, int round, int block_dim, int comp_V) { int total_round = comp_V/block_dim; int i = threadIdx.y, j = threadIdx.x, // column or row? i_off = blockIdx.x == 1? block_dim * ((blockIdx.y + round + 1) % total_round): block_dim * round, j_off = blockIdx.x == 1? block_dim * round : block_dim * ((blockIdx.y + round + 1) % total_round); extern __shared__ int shared_mem[]; shared_mem[i * block_dim + j] = adj_mat_d[(i + i_off) * comp_V + (j+j_off)]; shared_mem[(i + block_dim) * block_dim + j] = adj_mat_d[(i + i_off) * comp_V + j + round*block_dim]; shared_mem[(i + 2*block_dim) * block_dim + j] = adj_mat_d[(i + round * block_dim) * comp_V + (j + j_off)]; __syncthreads(); #pragma unroll for (int k = 0; k < block_dim; k++) { if (shared_mem[i * block_dim + j] > shared_mem[(i + block_dim) * block_dim + k] + shared_mem[(k + 2*block_dim) * block_dim + j]) { shared_mem[i * block_dim + j] = shared_mem[(i + block_dim) * block_dim + k] + shared_mem[(k + 2*block_dim) * block_dim + j]; if (round == i_off/block_dim) shared_mem[(i + 2*block_dim) * block_dim + j] = shared_mem[i * block_dim + j]; if (round == j_off/block_dim) shared_mem[(i + block_dim) * block_dim + j] = shared_mem[i * block_dim + j]; } } adj_mat_d[(i + i_off) * comp_V + (j+j_off)] = shared_mem[i * block_dim + j]; __syncthreads(); } // Phase 3 kernel (done !!!) __global__ void Phase_3(int* adj_mat_d, int round, int block_dim, int comp_V){ int i = threadIdx.y, j = threadIdx.x, i_off = block_dim * blockIdx.x, j_off = block_dim * blockIdx.y; extern __shared__ int shared_mem[]; shared_mem[i * block_dim + j] = adj_mat_d[(i + i_off) * comp_V + (j+j_off)]; shared_mem[(i + block_dim) * block_dim + j] = adj_mat_d[(i + i_off) * comp_V + j + round*block_dim]; shared_mem[(i + 2*block_dim) * block_dim + j] = adj_mat_d[(i + round * block_dim) * comp_V + (j + j_off)]; __syncthreads(); #pragma unroll for (int k = 0; k < block_dim; k++) { if (shared_mem[i * block_dim + j] > shared_mem[(i + block_dim) * block_dim + k] + shared_mem[(k + 2*block_dim) * block_dim + j]) shared_mem[i * block_dim + j] = shared_mem[(i + block_dim) * block_dim + k] + shared_mem[(k + 2*block_dim) * block_dim + j]; } adj_mat_d[(i + i_off) * comp_V + (j+j_off)] = shared_mem[i * block_dim + j]; __syncthreads(); } int main(int argc, char *argv[]){ /******************************* load data *********************************/ // only two arguments are allowed assert(argc == 3); int E, V; FILE *in_fp; in_fp = fopen(argv[1], "r"); if(in_fp == NULL) printf("Failed on opening file\n"); // read in data fread(&V, sizeof(int), 1, in_fp); fread(&E, sizeof(int), 1, in_fp); // compensate V to make V % block_dim == 0 int comp_V = V + (block_dim - ((V-1) % block_dim + 1)); //allocate memory int *adj_mat; size_t sz = comp_V * comp_V * sizeof(int); cudaMallocHost((void**) &adj_mat, sz); for(int i = 0; i < comp_V; i++){ for(int j = 0; j < comp_V; j++){ if(i == j) adj_mat[i*comp_V+j] = 0; else adj_mat[i*comp_V+j] = inf; } } // load data to graph int src, dst, w; while(E--){ fread(&src, sizeof(int), 1, in_fp); fread(&dst, sizeof(int), 1, in_fp); fread(&w, sizeof(int), 1, in_fp); adj_mat[src*comp_V+dst] = w; } fclose(in_fp); /****************************************************************************/ int round = ceil((float) comp_V/block_dim); int *adj_mat_d; // 2D block dim3 threads(block_dim, block_dim); dim3 p1(1, 1); dim3 p2(2, round-1); dim3 p3(round, round); //size_t sz = comp_V * comp_V * sizeof(int); cudaSetDevice(0); // Malloc memory cudaMalloc((void**) &adj_mat_d, sz); cudaMemcpy(adj_mat_d, adj_mat, sz, cudaMemcpyHostToDevice); for(int r = 0; r < round; r++){ Phase_1 <<<p1, threads, sizeof(int)*block_dim*block_dim >>>(adj_mat_d, r, block_dim, comp_V); // cudaDeviceSynchronize(); Phase_2 <<<p2, threads, sizeof(int)*3*block_dim*block_dim >>>(adj_mat_d, r, block_dim, comp_V); // cudaDeviceSynchronize(); Phase_3 <<<p3, threads, sizeof(int)*3*block_dim*block_dim >>>(adj_mat_d, r, block_dim, comp_V); } // copy back to host cudaMemcpy(adj_mat, adj_mat_d, sz, cudaMemcpyDeviceToHost); // output FILE *out_fp; out_fp = fopen(argv[2], "wb"); for(int i = 0; i < V; i++){ for(int j = 0; j < V; j++){ fwrite(adj_mat+i*comp_V+j, sizeof(int), 1, out_fp); } } fclose(out_fp); //free memory cudaFree(adj_mat_d); cudaFreeHost(adj_mat); return 0; }
d104cfae61c5842d35ec42d45d2d99ee443e1b31.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<stdio.h> #include<stdlib.h> #include <math.h> #include <Windows.h> #include <time.h> #include <assert.h> // 1 prac 2 // 2 prac 3 #define prac 2 #define CUDA_CALL(x) { const hipError_t a = (x); if(a != hipSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", hipGetErrorString(a), a, __LINE__); hipDeviceReset(); assert(0);}} typedef float TIMER_T; #define USE_CPU_TIMER 1 #define USE_GPU_TIMER 1 #if USE_CPU_TIMER == 1 __int64 start, freq, end; #define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); } #define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); } #else #define CHECK_TIME_START #define CHECK_TIME_END(a) #endif #if USE_GPU_TIMER == 1 hipEvent_t cuda_timer_start, cuda_timer_stop; #define CUDA_STREAM_0 (0) void create_device_timer() { CUDA_CALL(hipEventCreate(&cuda_timer_start)); CUDA_CALL(hipEventCreate(&cuda_timer_stop)); } void destroy_device_timer() { CUDA_CALL(hipEventDestroy(cuda_timer_start)); CUDA_CALL(hipEventDestroy(cuda_timer_stop)); } inline void start_device_timer() { hipEventRecord(cuda_timer_start, CUDA_STREAM_0); } inline TIMER_T stop_device_timer() { TIMER_T ms; hipEventRecord(cuda_timer_stop, CUDA_STREAM_0); hipEventSynchronize(cuda_timer_stop); hipEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop); return ms; } #define CHECK_TIME_INIT_GPU() { create_device_timer(); } #define CHECK_TIME_START_GPU() { start_device_timer(); } #define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); } #define CHECK_TIME_DEST_GPU() { destroy_device_timer(); } #else #define CHECK_TIME_INIT_GPU() #define CHECK_TIME_START_GPU() #define CHECK_TIME_END_GPU(a) #define CHECK_TIME_DEST_GPU() #endif TIMER_T compute_time = 0; TIMER_T device_time = 0; #if prac==1 typedef struct { int width; int height; float *elements; } Array; #define MAX_N_ELEMENTS (1 << 20) void generate_random_float_array(float *array, int n) { int i; for (i = 0; i < n; i++) { array[i] = 3.1415926f*((float)rand() / RAND_MAX); } } void combine_two_arrays(float *x, float *y, float *z, int n) { int i; for (i = 0; i < n; i++) { z[i] = 1.0f / (sin(x[i])*cos(y[i]) + cos(x[i])*sin(y[i])); } } __global__ void CombineTwoArrraysKernel(Array A, Array B, Array C) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; int id = gridDim.x*blockDim.x*row + col; C.elements[id] = 1.0f / (sin(A.elements[id])*cos(B.elements[id]) + cos(A.elements[id])*sin(B.elements[id])); int temp = 0; for(int i = 0; i < 65536; i++) temp++; } hipError_t combine_two_arrays_GPU(const Array A, const Array B, Array C); int BLOCK_SIZE = 256; int main() { int n_elements; srand((unsigned int)time(NULL)); n_elements = MAX_N_ELEMENTS; Array A, B, C, G; A.width = B.width = C.width = G.width = 1024; A.height = B.height = C.height = G.height = MAX_N_ELEMENTS / 1024; A.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS); B.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS); C.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS); G.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS); generate_random_float_array(A.elements, MAX_N_ELEMENTS); generate_random_float_array(B.elements, MAX_N_ELEMENTS); CHECK_TIME_START; combine_two_arrays(A.elements, B.elements, C.elements, n_elements); CHECK_TIME_END(compute_time); printf("***CPU C[10] = %f/ Time taken = %.6fms\n", C.elements[10], compute_time); hipError_t cudaStatus = combine_two_arrays_GPU(A, B, G); if (cudaStatus != hipSuccess) { fprintf(stderr, "combine_two_arrays_GPU failed!"); return 1; } printf("***GPU G[10] = %f/ Time taken = %.6fms\n", G.elements[10], device_time); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } hipError_t combine_two_arrays_GPU(const Array A, const Array B, Array C) { // . CHECK_TIME_INIT_GPU() hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; }///////////// if(cu..... ==CUDA_CALL Array d_A, d_B, d_C; size_t size; d_A.width = A.width; d_A.height = A.height; size = A.width * A.height * sizeof(float); CUDA_CALL(hipMalloc(&d_A.elements, size)) CUDA_CALL(hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice)) d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); CUDA_CALL(hipMalloc(&d_B.elements, size)) CUDA_CALL(hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice)) d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); CUDA_CALL(hipMalloc(&d_C.elements, size)) // Assume that width and height are multiples of BLOCK SIZE. dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(A.width / dimBlock.x); CHECK_TIME_START_GPU() CombineTwoArrraysKernel << < dimGrid, dimBlock >> > (d_A, d_B, d_C); CHECK_TIME_END_GPU(device_time) // CUDA_CALL(hipGetLastError()) // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. CUDA_CALL(hipDeviceSynchronize()) CUDA_CALL(hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost)) Error: hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_C.elements); CHECK_TIME_DEST_GPU() return cudaStatus; } #endif #if prac==2 int NX = 1024, NY = 1024; int n = NX * NY; #define BLOCK_SIZE 64 const int ELEM_PER_VECTOR = 64; float(*pVecX)[ELEM_PER_VECTOR], (*pVecY)[ELEM_PER_VECTOR], (*pVecY_G)[ELEM_PER_VECTOR]; float(*pMatA)[ELEM_PER_VECTOR]; void init_MatVec(void) { srand((unsigned)time(NULL)); FILE* fp = fopen("gen.bin", "rb"); fread(&n, sizeof(float), 1, fp); pVecX = new float[n][ELEM_PER_VECTOR]; pVecY = new float[n][ELEM_PER_VECTOR]; pVecY_G = new float[n][ELEM_PER_VECTOR]; pMatA = new float[ELEM_PER_VECTOR][ELEM_PER_VECTOR]; fread(pVecX, sizeof(float), n * ELEM_PER_VECTOR, fp); fread(pMatA, sizeof(float), ELEM_PER_VECTOR * ELEM_PER_VECTOR, fp); fclose(fp); } void Mat_Vec_Multiply() { for(int k = 0; k < n; k++){ for(int i = 0; i < ELEM_PER_VECTOR; i++){ pVecY[k][i] = 0.0f; for(int j = 0; j < ELEM_PER_VECTOR; j++) pVecY[k][i] += pMatA[i][j] * pVecX[k][j]; } } } __global__ void Mat_Vec_Multiply_Kernel(float (*X)[ELEM_PER_VECTOR], float (*Y)[ELEM_PER_VECTOR], float (*A)[ELEM_PER_VECTOR]) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; int id = gridDim.x*blockDim.x*row + col; for(int i = 0; i < ELEM_PER_VECTOR; i++){ Y[id][i] = 0.0f; for(int j = 0; j < ELEM_PER_VECTOR; j++) Y[id][i] += A[i][j] * X[id][j]; } } void Mat_Vec_Multiply_GPU() { CHECK_TIME_INIT_GPU() // Choose which GPU to run on, change this on a multi-GPU system. CUDA_CALL(hipSetDevice(0)) float (*d_X)[ELEM_PER_VECTOR], (*d_Y)[ELEM_PER_VECTOR], (*d_A)[ELEM_PER_VECTOR]; size_t size = n * ELEM_PER_VECTOR * sizeof(float); size_t size_a = ELEM_PER_VECTOR * ELEM_PER_VECTOR * sizeof(float); CUDA_CALL(hipMalloc(&d_X,size)) CUDA_CALL(hipMemcpy(d_X, pVecX, size, hipMemcpyHostToDevice)) CUDA_CALL(hipMalloc(&d_Y,size)) CUDA_CALL(hipMalloc(&d_A, size_a)) CUDA_CALL(hipMemcpy(d_A, pMatA, size_a, hipMemcpyHostToDevice)) dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(NX / dimBlock.x, NY / dimBlock.y); CHECK_TIME_START_GPU() Mat_Vec_Multiply_Kernel << < dimGrid, dimBlock >> > (d_X, d_Y, d_A); CHECK_TIME_END_GPU(device_time) CUDA_CALL(hipGetLastError()) CUDA_CALL(hipDeviceSynchronize()) CUDA_CALL(hipMemcpy(pVecY_G, d_Y, size, hipMemcpyDeviceToHost)) CHECK_TIME_DEST_GPU() } void init_data(int size) { srand((unsigned)time(NULL)); FILE *fp = fopen("gen.bin", "wb"); fwrite(&size, sizeof(int), 1, fp); int i, j; float x; for (i = 0; i < size; i++) { for (j = 0; j < ELEM_PER_VECTOR; j++) { x = 2.0f*((float)rand() / RAND_MAX) - 1.0f; fwrite(&x, sizeof(float), 1, fp); } } for (i = 0; i < ELEM_PER_VECTOR ; i++) { for (j = 0; j < ELEM_PER_VECTOR; j++) { x = 2.0f*((float)rand() / RAND_MAX) - 1.0f; fwrite(&x, sizeof(float), 1, fp); } } fclose(fp); return; } int main() { init_data(NX * NY); init_MatVec(); printf("n = %d file open ok.\n", n); CHECK_TIME_START; Mat_Vec_Multiply(); CHECK_TIME_END(compute_time); printf("***CPU C[10] = %.3f/ Time taken = %.6fms\n", pVecY[0][0], compute_time); Mat_Vec_Multiply_GPU(); printf("***GPU C[10] = %.3f/ Time taken = %.6fms\n", pVecY_G[0][0], device_time); for (int i = 0 ; i < NX * NY ; i++){ for (int j = 0 ; j < ELEM_PER_VECTOR ; j++){ if (!(fabs(pVecY[i][j] - pVecY_G[i][j]) < 0.00001f)){ printf("i : %d j : %d not correct\n", i, j); return -1; } } } printf("FINISHED!!\n"); return 0; } #endif
d104cfae61c5842d35ec42d45d2d99ee443e1b31.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<stdio.h> #include<stdlib.h> #include <math.h> #include <Windows.h> #include <time.h> #include <assert.h> // 1 prac 2 // 2 prac 3 #define prac 2 #define CUDA_CALL(x) { const cudaError_t a = (x); if(a != cudaSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", cudaGetErrorString(a), a, __LINE__); cudaDeviceReset(); assert(0);}} typedef float TIMER_T; #define USE_CPU_TIMER 1 #define USE_GPU_TIMER 1 #if USE_CPU_TIMER == 1 __int64 start, freq, end; #define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); } #define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); } #else #define CHECK_TIME_START #define CHECK_TIME_END(a) #endif #if USE_GPU_TIMER == 1 cudaEvent_t cuda_timer_start, cuda_timer_stop; #define CUDA_STREAM_0 (0) void create_device_timer() { CUDA_CALL(cudaEventCreate(&cuda_timer_start)); CUDA_CALL(cudaEventCreate(&cuda_timer_stop)); } void destroy_device_timer() { CUDA_CALL(cudaEventDestroy(cuda_timer_start)); CUDA_CALL(cudaEventDestroy(cuda_timer_stop)); } inline void start_device_timer() { cudaEventRecord(cuda_timer_start, CUDA_STREAM_0); } inline TIMER_T stop_device_timer() { TIMER_T ms; cudaEventRecord(cuda_timer_stop, CUDA_STREAM_0); cudaEventSynchronize(cuda_timer_stop); cudaEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop); return ms; } #define CHECK_TIME_INIT_GPU() { create_device_timer(); } #define CHECK_TIME_START_GPU() { start_device_timer(); } #define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); } #define CHECK_TIME_DEST_GPU() { destroy_device_timer(); } #else #define CHECK_TIME_INIT_GPU() #define CHECK_TIME_START_GPU() #define CHECK_TIME_END_GPU(a) #define CHECK_TIME_DEST_GPU() #endif TIMER_T compute_time = 0; TIMER_T device_time = 0; #if prac==1 typedef struct { int width; int height; float *elements; } Array; #define MAX_N_ELEMENTS (1 << 20) void generate_random_float_array(float *array, int n) { int i; for (i = 0; i < n; i++) { array[i] = 3.1415926f*((float)rand() / RAND_MAX); } } void combine_two_arrays(float *x, float *y, float *z, int n) { int i; for (i = 0; i < n; i++) { z[i] = 1.0f / (sin(x[i])*cos(y[i]) + cos(x[i])*sin(y[i])); } } __global__ void CombineTwoArrraysKernel(Array A, Array B, Array C) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; int id = gridDim.x*blockDim.x*row + col; C.elements[id] = 1.0f / (sin(A.elements[id])*cos(B.elements[id]) + cos(A.elements[id])*sin(B.elements[id])); int temp = 0; for(int i = 0; i < 65536; i++) temp++; } cudaError_t combine_two_arrays_GPU(const Array A, const Array B, Array C); int BLOCK_SIZE = 256; int main() { int n_elements; srand((unsigned int)time(NULL)); n_elements = MAX_N_ELEMENTS; Array A, B, C, G; A.width = B.width = C.width = G.width = 1024; A.height = B.height = C.height = G.height = MAX_N_ELEMENTS / 1024; A.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS); B.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS); C.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS); G.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS); generate_random_float_array(A.elements, MAX_N_ELEMENTS); generate_random_float_array(B.elements, MAX_N_ELEMENTS); CHECK_TIME_START; combine_two_arrays(A.elements, B.elements, C.elements, n_elements); CHECK_TIME_END(compute_time); printf("***CPU C[10] = %f/ Time taken = %.6fms\n", C.elements[10], compute_time); cudaError_t cudaStatus = combine_two_arrays_GPU(A, B, G); if (cudaStatus != cudaSuccess) { fprintf(stderr, "combine_two_arrays_GPU failed!"); return 1; } printf("***GPU G[10] = %f/ Time taken = %.6fms\n", G.elements[10], device_time); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } cudaError_t combine_two_arrays_GPU(const Array A, const Array B, Array C) { //아래 함수들을 사용하여 어떻게 하면 가급적 정확한 시간을 측정할 수 있을지 생각해볼 것. CHECK_TIME_INIT_GPU() cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; }///////////// if(cu..... ==CUDA_CALL Array d_A, d_B, d_C; size_t size; d_A.width = A.width; d_A.height = A.height; size = A.width * A.height * sizeof(float); CUDA_CALL(cudaMalloc(&d_A.elements, size)) CUDA_CALL(cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice)) d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); CUDA_CALL(cudaMalloc(&d_B.elements, size)) CUDA_CALL(cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice)) d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); CUDA_CALL(cudaMalloc(&d_C.elements, size)) // Assume that width and height are multiples of BLOCK SIZE. dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(A.width / dimBlock.x); CHECK_TIME_START_GPU() CombineTwoArrraysKernel << < dimGrid, dimBlock >> > (d_A, d_B, d_C); CHECK_TIME_END_GPU(device_time) // CUDA_CALL(cudaGetLastError()) // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. CUDA_CALL(cudaDeviceSynchronize()) CUDA_CALL(cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost)) Error: cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); CHECK_TIME_DEST_GPU() return cudaStatus; } #endif #if prac==2 int NX = 1024, NY = 1024; int n = NX * NY; #define BLOCK_SIZE 64 const int ELEM_PER_VECTOR = 64; float(*pVecX)[ELEM_PER_VECTOR], (*pVecY)[ELEM_PER_VECTOR], (*pVecY_G)[ELEM_PER_VECTOR]; float(*pMatA)[ELEM_PER_VECTOR]; void init_MatVec(void) { srand((unsigned)time(NULL)); FILE* fp = fopen("gen.bin", "rb"); fread(&n, sizeof(float), 1, fp); pVecX = new float[n][ELEM_PER_VECTOR]; pVecY = new float[n][ELEM_PER_VECTOR]; pVecY_G = new float[n][ELEM_PER_VECTOR]; pMatA = new float[ELEM_PER_VECTOR][ELEM_PER_VECTOR]; fread(pVecX, sizeof(float), n * ELEM_PER_VECTOR, fp); fread(pMatA, sizeof(float), ELEM_PER_VECTOR * ELEM_PER_VECTOR, fp); fclose(fp); } void Mat_Vec_Multiply() { for(int k = 0; k < n; k++){ for(int i = 0; i < ELEM_PER_VECTOR; i++){ pVecY[k][i] = 0.0f; for(int j = 0; j < ELEM_PER_VECTOR; j++) pVecY[k][i] += pMatA[i][j] * pVecX[k][j]; } } } __global__ void Mat_Vec_Multiply_Kernel(float (*X)[ELEM_PER_VECTOR], float (*Y)[ELEM_PER_VECTOR], float (*A)[ELEM_PER_VECTOR]) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; int id = gridDim.x*blockDim.x*row + col; for(int i = 0; i < ELEM_PER_VECTOR; i++){ Y[id][i] = 0.0f; for(int j = 0; j < ELEM_PER_VECTOR; j++) Y[id][i] += A[i][j] * X[id][j]; } } void Mat_Vec_Multiply_GPU() { CHECK_TIME_INIT_GPU() // Choose which GPU to run on, change this on a multi-GPU system. CUDA_CALL(cudaSetDevice(0)) float (*d_X)[ELEM_PER_VECTOR], (*d_Y)[ELEM_PER_VECTOR], (*d_A)[ELEM_PER_VECTOR]; size_t size = n * ELEM_PER_VECTOR * sizeof(float); size_t size_a = ELEM_PER_VECTOR * ELEM_PER_VECTOR * sizeof(float); CUDA_CALL(cudaMalloc(&d_X,size)) CUDA_CALL(cudaMemcpy(d_X, pVecX, size, cudaMemcpyHostToDevice)) CUDA_CALL(cudaMalloc(&d_Y,size)) CUDA_CALL(cudaMalloc(&d_A, size_a)) CUDA_CALL(cudaMemcpy(d_A, pMatA, size_a, cudaMemcpyHostToDevice)) dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(NX / dimBlock.x, NY / dimBlock.y); CHECK_TIME_START_GPU() Mat_Vec_Multiply_Kernel << < dimGrid, dimBlock >> > (d_X, d_Y, d_A); CHECK_TIME_END_GPU(device_time) CUDA_CALL(cudaGetLastError()) CUDA_CALL(cudaDeviceSynchronize()) CUDA_CALL(cudaMemcpy(pVecY_G, d_Y, size, cudaMemcpyDeviceToHost)) CHECK_TIME_DEST_GPU() } void init_data(int size) { srand((unsigned)time(NULL)); FILE *fp = fopen("gen.bin", "wb"); fwrite(&size, sizeof(int), 1, fp); int i, j; float x; for (i = 0; i < size; i++) { for (j = 0; j < ELEM_PER_VECTOR; j++) { x = 2.0f*((float)rand() / RAND_MAX) - 1.0f; fwrite(&x, sizeof(float), 1, fp); } } for (i = 0; i < ELEM_PER_VECTOR ; i++) { for (j = 0; j < ELEM_PER_VECTOR; j++) { x = 2.0f*((float)rand() / RAND_MAX) - 1.0f; fwrite(&x, sizeof(float), 1, fp); } } fclose(fp); return; } int main() { init_data(NX * NY); init_MatVec(); printf("n = %d file open ok.\n", n); CHECK_TIME_START; Mat_Vec_Multiply(); CHECK_TIME_END(compute_time); printf("***CPU C[10] = %.3f/ Time taken = %.6fms\n", pVecY[0][0], compute_time); Mat_Vec_Multiply_GPU(); printf("***GPU C[10] = %.3f/ Time taken = %.6fms\n", pVecY_G[0][0], device_time); for (int i = 0 ; i < NX * NY ; i++){ for (int j = 0 ; j < ELEM_PER_VECTOR ; j++){ if (!(fabs(pVecY[i][j] - pVecY_G[i][j]) < 0.00001f)){ printf("i : %d j : %d not correct\n", i, j); return -1; } } } printf("FINISHED!!\n"); return 0; } #endif
2b5e59e9b3296f46e85aabc7af3123a66b47a281.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "disjoint.cuh" #define PI 3.14159265 // 2** 31 __device__ const uint32_t REPLUSIVE_INIT = 2147483648; __device__ const int direction[8][2]={1,0, 1,1, 0,1, -1,1, -1,0, -1,-1, 0,-1, 1,-1}; #define CUDA_1D_KERNEL_LOOP(index, nthreads) \ for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; \ index += blockDim.x * gridDim.x) __global__ void find_parents( const int nthreads, const int height, const int width, const float theta_a, const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> input_angles, torch::PackedTensorAccessor32<int,3,torch::RestrictPtrTraits> parents, torch::PackedTensorAccessor32<int,2,torch::RestrictPtrTraits> roots) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int curr_w = index % width; int curr_h = index / width; float curr_angle = input_angles[curr_h][curr_w]; int pos=(curr_angle + PI/8)/(PI/4); if(pos >= 8) pos-=8; int next_h = curr_h + direction[pos][0]; int next_w = curr_w + direction[pos][1]; if (next_h >= height || next_h < 0 || next_w >= width || next_w < 0) { parents[0][curr_h][curr_w] = curr_h; parents[1][curr_h][curr_w] = curr_w; roots[curr_h][curr_w] = 1; return; } float next_angle = input_angles[next_h][next_w]; float angle_diff = abs(curr_angle - next_angle); angle_diff = min(angle_diff, 2*PI - angle_diff); if (angle_diff > theta_a * PI / 180) { parents[0][curr_h][curr_w] = curr_h; parents[1][curr_h][curr_w] = curr_w; roots[curr_h][curr_w] = 1; return; } parents[0][curr_h][curr_w] = next_h; parents[1][curr_h][curr_w] = next_w; } } __global__ void get_super_BPDs_step1( const int nthreads, const int height, const int width, torch::PackedTensorAccessor32<int,3,torch::RestrictPtrTraits> parents, int* super_BPDs) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int curr_w = index % width; int curr_h = index / width; int next_h = parents[0][curr_h][curr_w]; int next_w = parents[1][curr_h][curr_w]; int next_index = next_h*width + next_w; UNION(super_BPDs, index, next_index); } } __global__ void get_super_BPDs_step2( const int nthreads, int* super_BPDs) { CUDA_1D_KERNEL_LOOP(index, nthreads) { super_BPDs[index] = FIND(super_BPDs, index) + 1; } } __global__ void merge_nearby_root_pixels( const int nthreads, const int height, const int width, torch::PackedTensorAccessor32<int,2,torch::RestrictPtrTraits> roots, int* super_BPDs) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int curr_w = index % width; int curr_h = index / width; if (!roots[curr_h][curr_w]) return; for (int delta_h=0; delta_h<=min(3, height-1-curr_h); delta_h++) { for (int delta_w=-min(3, curr_w); delta_w<=min(3, width-1-curr_w); delta_w++) { int next_h = curr_h + delta_h; int next_w = curr_w + delta_w; if (roots[next_h][next_w]) { int next_index = next_h*width + next_w; UNION(super_BPDs, index, next_index); } } } } } __global__ void find_bnd_angle_diff( const int nthreads, const int height, const int width, const int num_superpixels, torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> input_angles, int* super_BPDs, torch::PackedTensorAccessor32<int,3,torch::RestrictPtrTraits> parents, torch::PackedTensorAccessor32<int,2,torch::RestrictPtrTraits> unique_super_BPDs_inverse, float* bnd_angle_diff, int* bnd_pair_nums) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int curr_w = index % width; int curr_h = index / width; int curr_index = curr_h*width + curr_w; // right and bottom point int delta_h[2] = {0,1}; int delta_w[2] = {1,0}; for (int i=0; i<2; i++) { int next_h = curr_h + delta_h[i]; int next_w = curr_w + delta_w[i]; if (next_w >= width || next_h >= height) continue; int next_index = next_h*width + next_w; if (super_BPDs[curr_index] != super_BPDs[next_index]) { int curr_position = unique_super_BPDs_inverse[curr_h][curr_w]; int next_position = unique_super_BPDs_inverse[next_h][next_w]; int min_position = min(curr_position, next_position); int max_position = max(curr_position, next_position); atomicAdd(bnd_pair_nums + min_position*num_superpixels + max_position, 1); // forward 3 steps respectively, then calculate angle diff int steps = 3; while (steps--) { int curr_parent_h = parents[0][curr_h][curr_w]; int curr_parent_w = parents[1][curr_h][curr_w]; curr_h = curr_parent_h; curr_w = curr_parent_w; int next_parent_h = parents[0][next_h][next_w]; int next_parent_w = parents[1][next_h][next_w]; next_h = next_parent_h; next_w = next_parent_w; } float curr_angle = input_angles[curr_h][curr_w]; float next_angle = input_angles[next_h][next_w]; float angle_diff = abs(curr_angle - next_angle); angle_diff = min(angle_diff, 2*PI - angle_diff); atomicAdd(bnd_angle_diff + min_position*num_superpixels + max_position, angle_diff); } } } } __global__ void classify_edges( const int nthreads, const int num_superpixels, const int nums, const float S_o, torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> bnd_angle_diff, torch::PackedTensorAccessor32<int,2,torch::RestrictPtrTraits> bnd_pair_nums, torch::PackedTensorAccessor32<bool,2,torch::RestrictPtrTraits> select_matrix, torch::PackedTensorAccessor32<int,2,torch::RestrictPtrTraits> edge_h, torch::PackedTensorAccessor32<int,2,torch::RestrictPtrTraits> edge_w, int* replusive_matrix) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int curr_w = index % num_superpixels; int curr_h = index / num_superpixels; if (bnd_pair_nums[curr_h][curr_w] == 0) return; float avg_angle_diff = bnd_angle_diff[curr_h][curr_w] / bnd_pair_nums[curr_h][curr_w]; bnd_angle_diff[curr_h][curr_w] = avg_angle_diff; if (avg_angle_diff > PI - S_o * PI / 180) { int inter_h = curr_w / 32; int inter_w = curr_w % 32; atomicOr(replusive_matrix + curr_h*nums + inter_h, REPLUSIVE_INIT >> inter_w); return; } select_matrix[curr_h][curr_w] = 1; edge_h[curr_h][curr_w] = curr_h; edge_w[curr_h][curr_w] = curr_w; } } __global__ void final_step( const int nthreads, torch::PackedTensorAccessor32<uint8_t,1,torch::RestrictPtrTraits> connect_marks, torch::PackedTensorAccessor32<int,1,torch::RestrictPtrTraits> edge_h, torch::PackedTensorAccessor32<int,1,torch::RestrictPtrTraits> edge_w, torch::PackedTensorAccessor32<int,1,torch::RestrictPtrTraits> unique_super_BPDs, int* super_BPDs) { CUDA_1D_KERNEL_LOOP(index, nthreads) { if (connect_marks[index]) { int index_h = unique_super_BPDs[edge_h[index]] - 1; int index_w = unique_super_BPDs[edge_w[index]] - 1; UNION(super_BPDs, index_h, index_w); } } } std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, \ torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> \ bpd_cuda(const torch::Tensor input_angles, const int height, const int width, const float theta_a, const float S_o) { const int kThreadsPerBlock = 1024; const int blocks = (height*width + kThreadsPerBlock - 1) / kThreadsPerBlock; torch::Tensor parents = torch::zeros({2, height, width}, torch::CUDA(torch::kInt32)); torch::Tensor roots = torch::zeros({height, width}, torch::CUDA(torch::kInt32)); // get parents and rootshipLaunchKernelGGL(( find_parents), dim3(blocks), dim3(kThreadsPerBlock), 0, 0, height*width, height, width, theta_a, input_angles.packed_accessor32<float,2,torch::RestrictPtrTraits>(), parents.packed_accessor32<int,3,torch::RestrictPtrTraits>(), roots.packed_accessor32<int,2,torch::RestrictPtrTraits>() ); // get super-BPDs, index from 0 ~ height*width - 1, init label from 1 ~ height*width torch::Tensor super_BPDs = torch::arange(1, height*width + 1, torch::CUDA(torch::kInt32));hipLaunchKernelGGL(( get_super_BPDs_step1), dim3(blocks), dim3(kThreadsPerBlock), 0, 0, height*width, height, width, parents.packed_accessor32<int,3,torch::RestrictPtrTraits>(), super_BPDs.contiguous().data_ptr<int>() ); hipLaunchKernelGGL(( get_super_BPDs_step2), dim3(blocks), dim3(kThreadsPerBlock), 0, 0, height*width, super_BPDs.contiguous().data_ptr<int>() ); auto super_BPDs_before_dilation = super_BPDs.clone(); super_BPDs_before_dilation = super_BPDs_before_dilation.reshape({height, width}); // merge nearby root pixelshipLaunchKernelGGL(( merge_nearby_root_pixels), dim3(blocks), dim3(kThreadsPerBlock), 0, 0, height*width, height, width, roots.packed_accessor32<int,2,torch::RestrictPtrTraits>(), super_BPDs.contiguous().data_ptr<int>() ); hipLaunchKernelGGL(( get_super_BPDs_step2), dim3(blocks), dim3(kThreadsPerBlock), 0, 0, height*width, super_BPDs.contiguous().data_ptr<int>() ); auto super_BPDs_after_dilation = super_BPDs.clone(); super_BPDs_after_dilation = super_BPDs_after_dilation.reshape({height, width}); // construct RAG auto unique_results = torch::_unique2(super_BPDs, true, true, true); auto unique_super_BPDs = std::get<0>(unique_results); auto unique_super_BPDs_inverse = std::get<1>(unique_results); unique_super_BPDs_inverse = unique_super_BPDs_inverse.to(torch::kInt32); unique_super_BPDs_inverse = unique_super_BPDs_inverse.reshape({height, width}); auto unique_super_BPDs_counts = std::get<2>(unique_results); unique_super_BPDs_counts = unique_super_BPDs_counts.to(torch::kInt32); int num_superpixels = unique_super_BPDs.numel(); torch::Tensor bnd_angle_diff = torch::zeros({num_superpixels, num_superpixels}, torch::CUDA(torch::kFloat32)); torch::Tensor bnd_pair_nums = torch::zeros({num_superpixels, num_superpixels}, torch::CUDA(torch::kInt32)); hipLaunchKernelGGL(( find_bnd_angle_diff), dim3(blocks), dim3(kThreadsPerBlock), 0, 0, height*width, height, width, num_superpixels, input_angles.packed_accessor32<float,2,torch::RestrictPtrTraits>(), super_BPDs.contiguous().data_ptr<int>(), parents.packed_accessor32<int,3,torch::RestrictPtrTraits>(), unique_super_BPDs_inverse.packed_accessor32<int,2,torch::RestrictPtrTraits>(), bnd_angle_diff.contiguous().data_ptr<float>(), bnd_pair_nums.contiguous().data_ptr<int>() ); // classify edges (replusive, large, small, tiny) torch::Tensor select_matrix = torch::zeros({num_superpixels, num_superpixels}, torch::CUDA(torch::kBool)); torch::Tensor edge_h = torch::zeros({num_superpixels, num_superpixels}, torch::CUDA(torch::kInt32)); torch::Tensor edge_w = torch::zeros({num_superpixels, num_superpixels}, torch::CUDA(torch::kInt32)); const int nums = (num_superpixels + 32 -1) / 32; torch::Tensor replusive_matrix = torch::zeros({num_superpixels, nums}, torch::CUDA(torch::kInt32)); const int blocks2 = (num_superpixels*num_superpixels + kThreadsPerBlock - 1) / kThreadsPerBlock; hipLaunchKernelGGL(( classify_edges), dim3(blocks2), dim3(kThreadsPerBlock), 0, 0, num_superpixels*num_superpixels, num_superpixels, nums, S_o, bnd_angle_diff.packed_accessor32<float,2,torch::RestrictPtrTraits>(), bnd_pair_nums.packed_accessor32<int,2,torch::RestrictPtrTraits>(), select_matrix.packed_accessor32<bool,2,torch::RestrictPtrTraits>(), edge_h.packed_accessor32<int,2,torch::RestrictPtrTraits>(), edge_w.packed_accessor32<int,2,torch::RestrictPtrTraits>(), replusive_matrix.contiguous().data_ptr<int>() ); bnd_angle_diff = bnd_angle_diff.masked_select(select_matrix); edge_h = edge_h.masked_select(select_matrix); edge_w = edge_w.masked_select(select_matrix); // diff small to large, sim large to small auto sort_index = bnd_angle_diff.argsort(); auto sorted_bnd_angle_diff = bnd_angle_diff.index({sort_index}); auto sorted_edge_h = edge_h.index({sort_index}); auto sorted_edge_w = edge_w.index({sort_index}); // connect edges sorted_bnd_angle_diff = sorted_bnd_angle_diff.to(torch::kCPU); sorted_edge_h = sorted_edge_h.to(torch::kCPU); sorted_edge_w = sorted_edge_w.to(torch::kCPU); replusive_matrix = replusive_matrix.to(torch::kCPU); unique_super_BPDs_counts = unique_super_BPDs_counts.to(torch::kCPU); unique_super_BPDs = unique_super_BPDs.to(torch::kCPU); return std::make_tuple(unique_super_BPDs_counts, sorted_edge_h, \ sorted_edge_w, sorted_bnd_angle_diff, replusive_matrix, unique_super_BPDs, \ roots, super_BPDs_before_dilation, super_BPDs_after_dilation, super_BPDs); } torch::Tensor bpd_cuda_final_step(const int height, const int width, torch::Tensor connect_marks, torch::Tensor edge_h, \ torch::Tensor edge_w, torch::Tensor unique_super_BPDs, torch::Tensor super_BPDs) { connect_marks = connect_marks.to(torch::kCUDA); edge_h = edge_h.to(torch::kCUDA); edge_w = edge_w.to(torch::kCUDA); unique_super_BPDs = unique_super_BPDs.to(torch::kCUDA); super_BPDs = super_BPDs.to(torch::kCUDA); const int num_edges = edge_h.numel(); const int kThreadsPerBlock = 1024; const int blocks = (num_edges + kThreadsPerBlock - 1) / kThreadsPerBlock; hipLaunchKernelGGL(( final_step), dim3(blocks), dim3(kThreadsPerBlock), 0, 0, num_edges, connect_marks.packed_accessor32<uint8_t,1,torch::RestrictPtrTraits>(), edge_h.packed_accessor32<int,1,torch::RestrictPtrTraits>(), edge_w.packed_accessor32<int,1,torch::RestrictPtrTraits>(), unique_super_BPDs.packed_accessor32<int,1,torch::RestrictPtrTraits>(), super_BPDs.contiguous().data_ptr<int>() ); const int blocks2 = (height*width + kThreadsPerBlock - 1) / kThreadsPerBlock; hipLaunchKernelGGL(( get_super_BPDs_step2), dim3(blocks2), dim3(kThreadsPerBlock), 0, 0, height*width, super_BPDs.contiguous().data_ptr<int>() ); super_BPDs = super_BPDs.reshape({height, width}); return super_BPDs; }
2b5e59e9b3296f46e85aabc7af3123a66b47a281.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include "disjoint.cuh" #define PI 3.14159265 // 2** 31 __device__ const uint32_t REPLUSIVE_INIT = 2147483648; __device__ const int direction[8][2]={1,0, 1,1, 0,1, -1,1, -1,0, -1,-1, 0,-1, 1,-1}; #define CUDA_1D_KERNEL_LOOP(index, nthreads) \ for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; \ index += blockDim.x * gridDim.x) __global__ void find_parents( const int nthreads, const int height, const int width, const float theta_a, const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> input_angles, torch::PackedTensorAccessor32<int,3,torch::RestrictPtrTraits> parents, torch::PackedTensorAccessor32<int,2,torch::RestrictPtrTraits> roots) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int curr_w = index % width; int curr_h = index / width; float curr_angle = input_angles[curr_h][curr_w]; int pos=(curr_angle + PI/8)/(PI/4); if(pos >= 8) pos-=8; int next_h = curr_h + direction[pos][0]; int next_w = curr_w + direction[pos][1]; if (next_h >= height || next_h < 0 || next_w >= width || next_w < 0) { parents[0][curr_h][curr_w] = curr_h; parents[1][curr_h][curr_w] = curr_w; roots[curr_h][curr_w] = 1; return; } float next_angle = input_angles[next_h][next_w]; float angle_diff = abs(curr_angle - next_angle); angle_diff = min(angle_diff, 2*PI - angle_diff); if (angle_diff > theta_a * PI / 180) { parents[0][curr_h][curr_w] = curr_h; parents[1][curr_h][curr_w] = curr_w; roots[curr_h][curr_w] = 1; return; } parents[0][curr_h][curr_w] = next_h; parents[1][curr_h][curr_w] = next_w; } } __global__ void get_super_BPDs_step1( const int nthreads, const int height, const int width, torch::PackedTensorAccessor32<int,3,torch::RestrictPtrTraits> parents, int* super_BPDs) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int curr_w = index % width; int curr_h = index / width; int next_h = parents[0][curr_h][curr_w]; int next_w = parents[1][curr_h][curr_w]; int next_index = next_h*width + next_w; UNION(super_BPDs, index, next_index); } } __global__ void get_super_BPDs_step2( const int nthreads, int* super_BPDs) { CUDA_1D_KERNEL_LOOP(index, nthreads) { super_BPDs[index] = FIND(super_BPDs, index) + 1; } } __global__ void merge_nearby_root_pixels( const int nthreads, const int height, const int width, torch::PackedTensorAccessor32<int,2,torch::RestrictPtrTraits> roots, int* super_BPDs) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int curr_w = index % width; int curr_h = index / width; if (!roots[curr_h][curr_w]) return; for (int delta_h=0; delta_h<=min(3, height-1-curr_h); delta_h++) { for (int delta_w=-min(3, curr_w); delta_w<=min(3, width-1-curr_w); delta_w++) { int next_h = curr_h + delta_h; int next_w = curr_w + delta_w; if (roots[next_h][next_w]) { int next_index = next_h*width + next_w; UNION(super_BPDs, index, next_index); } } } } } __global__ void find_bnd_angle_diff( const int nthreads, const int height, const int width, const int num_superpixels, torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> input_angles, int* super_BPDs, torch::PackedTensorAccessor32<int,3,torch::RestrictPtrTraits> parents, torch::PackedTensorAccessor32<int,2,torch::RestrictPtrTraits> unique_super_BPDs_inverse, float* bnd_angle_diff, int* bnd_pair_nums) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int curr_w = index % width; int curr_h = index / width; int curr_index = curr_h*width + curr_w; // right and bottom point int delta_h[2] = {0,1}; int delta_w[2] = {1,0}; for (int i=0; i<2; i++) { int next_h = curr_h + delta_h[i]; int next_w = curr_w + delta_w[i]; if (next_w >= width || next_h >= height) continue; int next_index = next_h*width + next_w; if (super_BPDs[curr_index] != super_BPDs[next_index]) { int curr_position = unique_super_BPDs_inverse[curr_h][curr_w]; int next_position = unique_super_BPDs_inverse[next_h][next_w]; int min_position = min(curr_position, next_position); int max_position = max(curr_position, next_position); atomicAdd(bnd_pair_nums + min_position*num_superpixels + max_position, 1); // forward 3 steps respectively, then calculate angle diff int steps = 3; while (steps--) { int curr_parent_h = parents[0][curr_h][curr_w]; int curr_parent_w = parents[1][curr_h][curr_w]; curr_h = curr_parent_h; curr_w = curr_parent_w; int next_parent_h = parents[0][next_h][next_w]; int next_parent_w = parents[1][next_h][next_w]; next_h = next_parent_h; next_w = next_parent_w; } float curr_angle = input_angles[curr_h][curr_w]; float next_angle = input_angles[next_h][next_w]; float angle_diff = abs(curr_angle - next_angle); angle_diff = min(angle_diff, 2*PI - angle_diff); atomicAdd(bnd_angle_diff + min_position*num_superpixels + max_position, angle_diff); } } } } __global__ void classify_edges( const int nthreads, const int num_superpixels, const int nums, const float S_o, torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> bnd_angle_diff, torch::PackedTensorAccessor32<int,2,torch::RestrictPtrTraits> bnd_pair_nums, torch::PackedTensorAccessor32<bool,2,torch::RestrictPtrTraits> select_matrix, torch::PackedTensorAccessor32<int,2,torch::RestrictPtrTraits> edge_h, torch::PackedTensorAccessor32<int,2,torch::RestrictPtrTraits> edge_w, int* replusive_matrix) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int curr_w = index % num_superpixels; int curr_h = index / num_superpixels; if (bnd_pair_nums[curr_h][curr_w] == 0) return; float avg_angle_diff = bnd_angle_diff[curr_h][curr_w] / bnd_pair_nums[curr_h][curr_w]; bnd_angle_diff[curr_h][curr_w] = avg_angle_diff; if (avg_angle_diff > PI - S_o * PI / 180) { int inter_h = curr_w / 32; int inter_w = curr_w % 32; atomicOr(replusive_matrix + curr_h*nums + inter_h, REPLUSIVE_INIT >> inter_w); return; } select_matrix[curr_h][curr_w] = 1; edge_h[curr_h][curr_w] = curr_h; edge_w[curr_h][curr_w] = curr_w; } } __global__ void final_step( const int nthreads, torch::PackedTensorAccessor32<uint8_t,1,torch::RestrictPtrTraits> connect_marks, torch::PackedTensorAccessor32<int,1,torch::RestrictPtrTraits> edge_h, torch::PackedTensorAccessor32<int,1,torch::RestrictPtrTraits> edge_w, torch::PackedTensorAccessor32<int,1,torch::RestrictPtrTraits> unique_super_BPDs, int* super_BPDs) { CUDA_1D_KERNEL_LOOP(index, nthreads) { if (connect_marks[index]) { int index_h = unique_super_BPDs[edge_h[index]] - 1; int index_w = unique_super_BPDs[edge_w[index]] - 1; UNION(super_BPDs, index_h, index_w); } } } std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, \ torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> \ bpd_cuda(const torch::Tensor input_angles, const int height, const int width, const float theta_a, const float S_o) { const int kThreadsPerBlock = 1024; const int blocks = (height*width + kThreadsPerBlock - 1) / kThreadsPerBlock; torch::Tensor parents = torch::zeros({2, height, width}, torch::CUDA(torch::kInt32)); torch::Tensor roots = torch::zeros({height, width}, torch::CUDA(torch::kInt32)); // get parents and roots find_parents<<<blocks, kThreadsPerBlock>>>( height*width, height, width, theta_a, input_angles.packed_accessor32<float,2,torch::RestrictPtrTraits>(), parents.packed_accessor32<int,3,torch::RestrictPtrTraits>(), roots.packed_accessor32<int,2,torch::RestrictPtrTraits>() ); // get super-BPDs, index from 0 ~ height*width - 1, init label from 1 ~ height*width torch::Tensor super_BPDs = torch::arange(1, height*width + 1, torch::CUDA(torch::kInt32)); get_super_BPDs_step1<<<blocks, kThreadsPerBlock>>>( height*width, height, width, parents.packed_accessor32<int,3,torch::RestrictPtrTraits>(), super_BPDs.contiguous().data_ptr<int>() ); get_super_BPDs_step2<<<blocks, kThreadsPerBlock>>>( height*width, super_BPDs.contiguous().data_ptr<int>() ); auto super_BPDs_before_dilation = super_BPDs.clone(); super_BPDs_before_dilation = super_BPDs_before_dilation.reshape({height, width}); // merge nearby root pixels merge_nearby_root_pixels<<<blocks, kThreadsPerBlock>>>( height*width, height, width, roots.packed_accessor32<int,2,torch::RestrictPtrTraits>(), super_BPDs.contiguous().data_ptr<int>() ); get_super_BPDs_step2<<<blocks, kThreadsPerBlock>>>( height*width, super_BPDs.contiguous().data_ptr<int>() ); auto super_BPDs_after_dilation = super_BPDs.clone(); super_BPDs_after_dilation = super_BPDs_after_dilation.reshape({height, width}); // construct RAG auto unique_results = torch::_unique2(super_BPDs, true, true, true); auto unique_super_BPDs = std::get<0>(unique_results); auto unique_super_BPDs_inverse = std::get<1>(unique_results); unique_super_BPDs_inverse = unique_super_BPDs_inverse.to(torch::kInt32); unique_super_BPDs_inverse = unique_super_BPDs_inverse.reshape({height, width}); auto unique_super_BPDs_counts = std::get<2>(unique_results); unique_super_BPDs_counts = unique_super_BPDs_counts.to(torch::kInt32); int num_superpixels = unique_super_BPDs.numel(); torch::Tensor bnd_angle_diff = torch::zeros({num_superpixels, num_superpixels}, torch::CUDA(torch::kFloat32)); torch::Tensor bnd_pair_nums = torch::zeros({num_superpixels, num_superpixels}, torch::CUDA(torch::kInt32)); find_bnd_angle_diff<<<blocks, kThreadsPerBlock>>>( height*width, height, width, num_superpixels, input_angles.packed_accessor32<float,2,torch::RestrictPtrTraits>(), super_BPDs.contiguous().data_ptr<int>(), parents.packed_accessor32<int,3,torch::RestrictPtrTraits>(), unique_super_BPDs_inverse.packed_accessor32<int,2,torch::RestrictPtrTraits>(), bnd_angle_diff.contiguous().data_ptr<float>(), bnd_pair_nums.contiguous().data_ptr<int>() ); // classify edges (replusive, large, small, tiny) torch::Tensor select_matrix = torch::zeros({num_superpixels, num_superpixels}, torch::CUDA(torch::kBool)); torch::Tensor edge_h = torch::zeros({num_superpixels, num_superpixels}, torch::CUDA(torch::kInt32)); torch::Tensor edge_w = torch::zeros({num_superpixels, num_superpixels}, torch::CUDA(torch::kInt32)); const int nums = (num_superpixels + 32 -1) / 32; torch::Tensor replusive_matrix = torch::zeros({num_superpixels, nums}, torch::CUDA(torch::kInt32)); const int blocks2 = (num_superpixels*num_superpixels + kThreadsPerBlock - 1) / kThreadsPerBlock; classify_edges<<<blocks2, kThreadsPerBlock>>>( num_superpixels*num_superpixels, num_superpixels, nums, S_o, bnd_angle_diff.packed_accessor32<float,2,torch::RestrictPtrTraits>(), bnd_pair_nums.packed_accessor32<int,2,torch::RestrictPtrTraits>(), select_matrix.packed_accessor32<bool,2,torch::RestrictPtrTraits>(), edge_h.packed_accessor32<int,2,torch::RestrictPtrTraits>(), edge_w.packed_accessor32<int,2,torch::RestrictPtrTraits>(), replusive_matrix.contiguous().data_ptr<int>() ); bnd_angle_diff = bnd_angle_diff.masked_select(select_matrix); edge_h = edge_h.masked_select(select_matrix); edge_w = edge_w.masked_select(select_matrix); // diff small to large, sim large to small auto sort_index = bnd_angle_diff.argsort(); auto sorted_bnd_angle_diff = bnd_angle_diff.index({sort_index}); auto sorted_edge_h = edge_h.index({sort_index}); auto sorted_edge_w = edge_w.index({sort_index}); // connect edges sorted_bnd_angle_diff = sorted_bnd_angle_diff.to(torch::kCPU); sorted_edge_h = sorted_edge_h.to(torch::kCPU); sorted_edge_w = sorted_edge_w.to(torch::kCPU); replusive_matrix = replusive_matrix.to(torch::kCPU); unique_super_BPDs_counts = unique_super_BPDs_counts.to(torch::kCPU); unique_super_BPDs = unique_super_BPDs.to(torch::kCPU); return std::make_tuple(unique_super_BPDs_counts, sorted_edge_h, \ sorted_edge_w, sorted_bnd_angle_diff, replusive_matrix, unique_super_BPDs, \ roots, super_BPDs_before_dilation, super_BPDs_after_dilation, super_BPDs); } torch::Tensor bpd_cuda_final_step(const int height, const int width, torch::Tensor connect_marks, torch::Tensor edge_h, \ torch::Tensor edge_w, torch::Tensor unique_super_BPDs, torch::Tensor super_BPDs) { connect_marks = connect_marks.to(torch::kCUDA); edge_h = edge_h.to(torch::kCUDA); edge_w = edge_w.to(torch::kCUDA); unique_super_BPDs = unique_super_BPDs.to(torch::kCUDA); super_BPDs = super_BPDs.to(torch::kCUDA); const int num_edges = edge_h.numel(); const int kThreadsPerBlock = 1024; const int blocks = (num_edges + kThreadsPerBlock - 1) / kThreadsPerBlock; final_step<<<blocks, kThreadsPerBlock>>>( num_edges, connect_marks.packed_accessor32<uint8_t,1,torch::RestrictPtrTraits>(), edge_h.packed_accessor32<int,1,torch::RestrictPtrTraits>(), edge_w.packed_accessor32<int,1,torch::RestrictPtrTraits>(), unique_super_BPDs.packed_accessor32<int,1,torch::RestrictPtrTraits>(), super_BPDs.contiguous().data_ptr<int>() ); const int blocks2 = (height*width + kThreadsPerBlock - 1) / kThreadsPerBlock; get_super_BPDs_step2<<<blocks2, kThreadsPerBlock>>>( height*width, super_BPDs.contiguous().data_ptr<int>() ); super_BPDs = super_BPDs.reshape({height, width}); return super_BPDs; }
9aa9ffe2976f2e7a82eb9452c49655771b922ec7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void VecAdd(int* ret, int a, int b) { ret[threadIdx.x] = a + b + threadIdx.x; } int main(void) { int a = 10; int b = 100; int* ret = NULL; // results of addition hipMallocManaged(&ret, 1000 * sizeof(int)); hipLaunchKernelGGL(( VecAdd), dim3(1), dim3(1000) , 0, 0, ret, a, b); hipDeviceSynchronize(); for (int i = 0; i < 1000; i++) { printf("%4d: %d + %d + %4d = %5d\n", i, a, b, i, ret[i]); } hipFree(ret); return 0; }
9aa9ffe2976f2e7a82eb9452c49655771b922ec7.cu
#include <stdio.h> __global__ void VecAdd(int* ret, int a, int b) { ret[threadIdx.x] = a + b + threadIdx.x; } int main(void) { int a = 10; int b = 100; int* ret = NULL; // results of addition cudaMallocManaged(&ret, 1000 * sizeof(int)); VecAdd<<< 1, 1000 >>>(ret, a, b); cudaDeviceSynchronize(); for (int i = 0; i < 1000; i++) { printf("%4d: %d + %d + %4d = %5d\n", i, a, b, i, ret[i]); } cudaFree(ret); return 0; }
74a409c1538d5ada044b98b3185558c8b6c84fb3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=128 --blockDim=128 #define MUL(a, b) __umul24(a, b) #define QRNG_RESOLUTION 31 #ifndef DOUBLE_PRECISION __device__ static __attribute__((always_inline)) float MoroInvCNDgpu(unsigned int x) { const float a1 = 2.50662823884f; const float a2 = -18.61500062529f; const float a3 = 41.39119773534f; const float a4 = -25.44106049637f; const float b1 = -8.4735109309f; const float b2 = 23.08336743743f; const float b3 = -21.06224101826f; const float b4 = 3.13082909833f; const float c1 = 0.337475482272615f; const float c2 = 0.976169019091719f; const float c3 = 0.160797971491821f; const float c4 = 2.76438810333863E-02f; const float c5 = 3.8405729373609E-03f; const float c6 = 3.951896511919E-04f; const float c7 = 3.21767881768E-05f; const float c8 = 2.888167364E-07f; const float c9 = 3.960315187E-07f; float z; bool negate = false; // Ensure the conversion to floating point will give a value in the // range (0,0.5] by restricting the input to the bottom half of the // input domain. We will later reflect the result if the input was // originally in the top half of the input domain if (x >= 0x80000000UL) { x = 0xffffffffUL - x; negate = true; } // x is now in the range [0,0x80000000) (i.e. [0,0x7fffffff]) // Convert to floating point in (0,0.5] const float x1 = 1.0f / static_cast<float>(0xffffffffUL); const float x2 = x1 / 2.0f; float p1 = x * x1 + x2; // Convert to floating point in (-0.5,0] float p2 = p1 - 0.5f; // The input to the Moro inversion is p2 which is in the range // (-0.5,0]. This means that our output will be the negative side // of the bell curve (which we will reflect if "negate" is true). // Main body of the bell curve for |p| < 0.42 if (p2 > -0.42f) { z = p2 * p2; z = p2 * (((a4 * z + a3) * z + a2) * z + a1) / ((((b4 * z + b3) * z + b2) * z + b1) * z + 1.0f); } // Special case (Chebychev) for tail else { z = __logf(-__logf(p1)); z = - (c1 + z * (c2 + z * (c3 + z * (c4 + z * (c5 + z * (c6 + z * (c7 + z * (c8 + z * c9)))))))); } // If the original input (x) was in the top half of the range, reflect // to get the positive side of the bell curve return negate ? -z : z; } #else __device__ static __attribute__((always_inline)) double MoroInvCNDgpu(unsigned int x) { const double a1 = 2.50662823884; const double a2 = -18.61500062529; const double a3 = 41.39119773534; const double a4 = -25.44106049637; const double b1 = -8.4735109309; const double b2 = 23.08336743743; const double b3 = -21.06224101826; const double b4 = 3.13082909833; const double c1 = 0.337475482272615; const double c2 = 0.976169019091719; const double c3 = 0.160797971491821; const double c4 = 2.76438810333863E-02; const double c5 = 3.8405729373609E-03; const double c6 = 3.951896511919E-04; const double c7 = 3.21767881768E-05; const double c8 = 2.888167364E-07; const double c9 = 3.960315187E-07; double z; bool negate = false; // Ensure the conversion to floating point will give a value in the // range (0,0.5] by restricting the input to the bottom half of the // input domain. We will later reflect the result if the input was // originally in the top half of the input domain if (x >= 0x80000000UL) { x = 0xffffffffUL - x; negate = true; } // x is now in the range [0,0x80000000) (i.e. [0,0x7fffffff]) // Convert to floating point in (0,0.5] const double x1 = 1.0 / static_cast<double>(0xffffffffUL); const double x2 = x1 / 2.0; double p1 = x * x1 + x2; // Convert to floating point in (-0.5,0] double p2 = p1 - 0.5; // The input to the Moro inversion is p2 which is in the range // (-0.5,0]. This means that our output will be the negative side // of the bell curve (which we will reflect if "negate" is true). // Main body of the bell curve for |p| < 0.42 if (p2 > -0.42) { z = p2 * p2; z = p2 * (((a4 * z + a3) * z + a2) * z + a1) / ((((b4 * z + b3) * z + b2) * z + b1) * z + 1.0); } // Special case (Chebychev) for tail else { z = log(-log(p1)); z = - (c1 + z * (c2 + z * (c3 + z * (c4 + z * (c5 + z * (c6 + z * (c7 + z * (c8 + z * c9)))))))); } // If the original input (x) was in the top half of the range, reflect // to get the positive side of the bell curve return negate ? -z : z; } #endif __global__ void inverseCNDKernel( float *d_Output, unsigned int *d_Input, unsigned int pathN ) { unsigned int distance = ((unsigned int)-1) / (pathN + 1); unsigned int tid = MUL(blockDim.x, blockIdx.x) + threadIdx.x; unsigned int threadN = MUL(blockDim.x, gridDim.x); //Transform input number sequence if it's supplied if (d_Input) { for (unsigned int pos = tid; pos < pathN; pos += threadN) { unsigned int d = d_Input[pos]; d_Output[pos] = (float)MoroInvCNDgpu(d); } } //Else generate input uniformly placed samples on the fly //and write to destination else { for (unsigned int pos = tid; pos < pathN; pos += threadN) { unsigned int d = (pos + 1) * distance; d_Output[pos] = (float)MoroInvCNDgpu(d); } } }
74a409c1538d5ada044b98b3185558c8b6c84fb3.cu
//pass //--gridDim=128 --blockDim=128 #define MUL(a, b) __umul24(a, b) #define QRNG_RESOLUTION 31 #ifndef DOUBLE_PRECISION __device__ static __attribute__((always_inline)) float MoroInvCNDgpu(unsigned int x) { const float a1 = 2.50662823884f; const float a2 = -18.61500062529f; const float a3 = 41.39119773534f; const float a4 = -25.44106049637f; const float b1 = -8.4735109309f; const float b2 = 23.08336743743f; const float b3 = -21.06224101826f; const float b4 = 3.13082909833f; const float c1 = 0.337475482272615f; const float c2 = 0.976169019091719f; const float c3 = 0.160797971491821f; const float c4 = 2.76438810333863E-02f; const float c5 = 3.8405729373609E-03f; const float c6 = 3.951896511919E-04f; const float c7 = 3.21767881768E-05f; const float c8 = 2.888167364E-07f; const float c9 = 3.960315187E-07f; float z; bool negate = false; // Ensure the conversion to floating point will give a value in the // range (0,0.5] by restricting the input to the bottom half of the // input domain. We will later reflect the result if the input was // originally in the top half of the input domain if (x >= 0x80000000UL) { x = 0xffffffffUL - x; negate = true; } // x is now in the range [0,0x80000000) (i.e. [0,0x7fffffff]) // Convert to floating point in (0,0.5] const float x1 = 1.0f / static_cast<float>(0xffffffffUL); const float x2 = x1 / 2.0f; float p1 = x * x1 + x2; // Convert to floating point in (-0.5,0] float p2 = p1 - 0.5f; // The input to the Moro inversion is p2 which is in the range // (-0.5,0]. This means that our output will be the negative side // of the bell curve (which we will reflect if "negate" is true). // Main body of the bell curve for |p| < 0.42 if (p2 > -0.42f) { z = p2 * p2; z = p2 * (((a4 * z + a3) * z + a2) * z + a1) / ((((b4 * z + b3) * z + b2) * z + b1) * z + 1.0f); } // Special case (Chebychev) for tail else { z = __logf(-__logf(p1)); z = - (c1 + z * (c2 + z * (c3 + z * (c4 + z * (c5 + z * (c6 + z * (c7 + z * (c8 + z * c9)))))))); } // If the original input (x) was in the top half of the range, reflect // to get the positive side of the bell curve return negate ? -z : z; } #else __device__ static __attribute__((always_inline)) double MoroInvCNDgpu(unsigned int x) { const double a1 = 2.50662823884; const double a2 = -18.61500062529; const double a3 = 41.39119773534; const double a4 = -25.44106049637; const double b1 = -8.4735109309; const double b2 = 23.08336743743; const double b3 = -21.06224101826; const double b4 = 3.13082909833; const double c1 = 0.337475482272615; const double c2 = 0.976169019091719; const double c3 = 0.160797971491821; const double c4 = 2.76438810333863E-02; const double c5 = 3.8405729373609E-03; const double c6 = 3.951896511919E-04; const double c7 = 3.21767881768E-05; const double c8 = 2.888167364E-07; const double c9 = 3.960315187E-07; double z; bool negate = false; // Ensure the conversion to floating point will give a value in the // range (0,0.5] by restricting the input to the bottom half of the // input domain. We will later reflect the result if the input was // originally in the top half of the input domain if (x >= 0x80000000UL) { x = 0xffffffffUL - x; negate = true; } // x is now in the range [0,0x80000000) (i.e. [0,0x7fffffff]) // Convert to floating point in (0,0.5] const double x1 = 1.0 / static_cast<double>(0xffffffffUL); const double x2 = x1 / 2.0; double p1 = x * x1 + x2; // Convert to floating point in (-0.5,0] double p2 = p1 - 0.5; // The input to the Moro inversion is p2 which is in the range // (-0.5,0]. This means that our output will be the negative side // of the bell curve (which we will reflect if "negate" is true). // Main body of the bell curve for |p| < 0.42 if (p2 > -0.42) { z = p2 * p2; z = p2 * (((a4 * z + a3) * z + a2) * z + a1) / ((((b4 * z + b3) * z + b2) * z + b1) * z + 1.0); } // Special case (Chebychev) for tail else { z = log(-log(p1)); z = - (c1 + z * (c2 + z * (c3 + z * (c4 + z * (c5 + z * (c6 + z * (c7 + z * (c8 + z * c9)))))))); } // If the original input (x) was in the top half of the range, reflect // to get the positive side of the bell curve return negate ? -z : z; } #endif __global__ void inverseCNDKernel( float *d_Output, unsigned int *d_Input, unsigned int pathN ) { unsigned int distance = ((unsigned int)-1) / (pathN + 1); unsigned int tid = MUL(blockDim.x, blockIdx.x) + threadIdx.x; unsigned int threadN = MUL(blockDim.x, gridDim.x); //Transform input number sequence if it's supplied if (d_Input) { for (unsigned int pos = tid; pos < pathN; pos += threadN) { unsigned int d = d_Input[pos]; d_Output[pos] = (float)MoroInvCNDgpu(d); } } //Else generate input uniformly placed samples on the fly //and write to destination else { for (unsigned int pos = tid; pos < pathN; pos += threadN) { unsigned int d = (pos + 1) * distance; d_Output[pos] = (float)MoroInvCNDgpu(d); } } }
329869a2d9233b5c48786942091520af1227db6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* program : Calling CUDA-kernel using ArrayFire v3.4 library. ArrayFire v3.4 handles the boilerplate of CUDA-API. date : 11th Sept 2017 (11/09/2017) coder : Muhammad Izham a.k.a Sugita institution : Universiti Malaysia Perlis (UniMAP) contact : sugita5019@gmail.com */ /* How to compile: $ nvcc -ccbin=g++ -std=c++11 -o filename filename.cu -lcuda -lcudart -lafcuda */ #include<arrayfire.h> #include<cuda.h> #include<cstdlib> #include<cstdio> #include "sys/time.h" #include "time.h" //play with the block size to find your best performance //hint: blockDim_x should be bigger than blockDim_y //hint: blockDim_x*blockDim_y should not exceed 1024 (gpu dependent) #define blockDim_x 128 #define blockDim_y 4 #define pi 4.0*atan(1.0) #define MIN2(x, y) ((x) < (y) ? (x) : (y)) using namespace af; //Heat equation kernel __global__ void cuda_diffusion2d_0 // ==================================================================== // // program : CUDA device code for 2-D diffusion equation // for 16 x 16 block and 16 x 16 thread per 1 block // // date : Nov 07, 2008 // programmer : Takayuki Aoki // place : Tokyo Institute of Technology // ( float *f, /* dependent variable */ float *fn, /* dependent variable */ int nx, /* grid number in the x-direction */ int ny, /* grid number in the x-direction */ float c0, /* coefficient no.0 */ float c1, /* coefficient no.1 */ float c2 /* coefficient no.2 */ ) // -------------------------------------------------------------------- { int j, jx, jy; float fcc, fce, fcw, fcs, fcn; jy = blockDim.y*blockIdx.y + threadIdx.y; jx = blockDim.x*blockIdx.x + threadIdx.x; j = nx*jy + jx; fcc = f[j]; if(jx == 0) fcw = fcc; else fcw = f[j - 1]; if(jx == nx - 1) fce = fcc; else fce = f[j+1]; if(jy == 0) fcs = fcc; else fcs = f[j-nx]; if(jy == ny - 1) fcn = fcc; else fcn = f[j+nx]; fn[j] = c0*(fce + fcw) + c1*(fcn + fcs) + c2*fcc; } float diffusion2d // ==================================================================== // // purpose : 2-dimensional diffusion equation solved by FDM // // date : May 16, 2008 // programmer : Takayuki Aoki // place : Tokyo Institute of Technology // ( int nx, /* x-dimensional grid size */ int ny, /* y-dimensional grid size */ float *f, /* dependent variable */ float *fn, /* updated dependent variable */ float kappa, /* diffusion coefficient */ float dt, /* time step interval */ float dx, /* grid spacing in the x-direction */ float dy /* grid spacing in the y-direction */ ) // -------------------------------------------------------------------- { float c0 = kappa*dt/(dx*dx), c1 = kappa*dt/(dy*dy), c2 = 1.0 - 2.0*(c0 + c1); dim3 grid(nx/blockDim_x,ny/blockDim_y,1), threads(blockDim_x,blockDim_y,1);hipLaunchKernelGGL(( cuda_diffusion2d_0), dim3(grid), dim3(threads) , 0, 0, f,fn,nx,ny,c0,c1,c2); return (float)(nx*ny)*7.0; } int main() { struct timeval start,finish; double duration; float flops=0.0; int imax = 256; int jmax = 256; int nodes = imax*jmax; float dx = 1.0/((float)imax - 1); float dy = 1.0/((float)jmax - 1); //initiate from host float h_Temp[nodes]; for(int i=0; i<imax; ++i){ for(int j=0; j<jmax; ++j){ int id = i*jmax + j; h_Temp[id] = sin((float)i * dx * pi )*sin( (float)j * dy * pi); } } float kappa = 0.1; float dt = 0.20*MIN2(dx*dx,dy*dy)/kappa; int itermax = 20601; array Temp0(nodes, h_Temp); //initiate on gpu array Temp1(nodes, h_Temp); float *d_Temp0 = Temp0.device<float>(); float *d_Temp1 = Temp1.device<float>(); sync(); /* for output data during loop */ float *h_copy = new float[nodes]; FILE *fp0; fp0 = fopen("peak.dat","w"); gettimeofday(&start, NULL); // time loop for(int iter =0; iter < itermax; ++iter){ flops += diffusion2d(imax,jmax, d_Temp0, d_Temp1, kappa, dt, dx, dy); //( // int nx, /* x-dimensional grid size */ // int ny, /* y-dimensional grid size */ // float *f, /* dependent variable */ // float *fn, /* updated dependent variable */ // float kappa, /* diffusion coefficient */ // float dt, /* time step interval */ // float dx, /* grid spacing in the x-direction */ // float dy /* grid spacing in the y-direction */ //) // Use unlock to return back to ArrayFire stream. Otherwise just use the // d_Temp0 and d_Temp1 pointer. // Temp0.unlock(); // Temp1.unlock(); //output to file /* Temp1.host(h_copy); int id = (imax/2)*jmax + (jmax/2); fprintf(fp0,"%f\t %f\n", iter*dt, h_copy[id]);*/ if(iter > 0 && iter % 100 == 0){ printf("time(%d) = %f\n", iter, (float)iter*dt); } //Update pointer d_Temp0 = d_Temp1; } gettimeofday(&finish, NULL); duration = ((double)(finish.tv_sec-start.tv_sec)*1000000 + (double)(finish.tv_usec-start.tv_usec)) / 1000000; printf("Total operations : %f\n", flops); flops = flops/(duration*1.0e06); printf("Elapsed time:%lf secs\n", duration); printf("Time per loop: %lf secs\n", duration/(double)itermax); printf("Performance : %.2f MFlops\n", flops); fclose(fp0); delete [] h_copy; /*Transfering calculation data to host */ float* h_Temp0 = new float[nodes]; Temp0.host(h_Temp0); /*Write data to file. Tecplot ASCII format. */ FILE* fp; fp = fopen("cuda_diff.dat","w"); fprintf(fp,"variables = \"x\", \"y\", \"Temp\" \n"); fprintf(fp,"zone t=\"test\" \n"); fprintf(fp,"i=%d,\t j=%d\n", imax, jmax); for(int i=0; i<imax; ++i){ for(int j=0; j<jmax; ++j){ int id = i*jmax + j; float xg = (float)i*dx; float yg = (float)j*dy; fprintf(fp,"%f %f %f\n", xg, yg, h_Temp0[id]); } } fclose(fp); delete [] h_Temp0; }
329869a2d9233b5c48786942091520af1227db6f.cu
/* program : Calling CUDA-kernel using ArrayFire v3.4 library. ArrayFire v3.4 handles the boilerplate of CUDA-API. date : 11th Sept 2017 (11/09/2017) coder : Muhammad Izham a.k.a Sugita institution : Universiti Malaysia Perlis (UniMAP) contact : sugita5019@gmail.com */ /* How to compile: $ nvcc -ccbin=g++ -std=c++11 -o filename filename.cu -lcuda -lcudart -lafcuda */ #include<arrayfire.h> #include<cuda.h> #include<cstdlib> #include<cstdio> #include "sys/time.h" #include "time.h" //play with the block size to find your best performance //hint: blockDim_x should be bigger than blockDim_y //hint: blockDim_x*blockDim_y should not exceed 1024 (gpu dependent) #define blockDim_x 128 #define blockDim_y 4 #define pi 4.0*atan(1.0) #define MIN2(x, y) ((x) < (y) ? (x) : (y)) using namespace af; //Heat equation kernel __global__ void cuda_diffusion2d_0 // ==================================================================== // // program : CUDA device code for 2-D diffusion equation // for 16 x 16 block and 16 x 16 thread per 1 block // // date : Nov 07, 2008 // programmer : Takayuki Aoki // place : Tokyo Institute of Technology // ( float *f, /* dependent variable */ float *fn, /* dependent variable */ int nx, /* grid number in the x-direction */ int ny, /* grid number in the x-direction */ float c0, /* coefficient no.0 */ float c1, /* coefficient no.1 */ float c2 /* coefficient no.2 */ ) // -------------------------------------------------------------------- { int j, jx, jy; float fcc, fce, fcw, fcs, fcn; jy = blockDim.y*blockIdx.y + threadIdx.y; jx = blockDim.x*blockIdx.x + threadIdx.x; j = nx*jy + jx; fcc = f[j]; if(jx == 0) fcw = fcc; else fcw = f[j - 1]; if(jx == nx - 1) fce = fcc; else fce = f[j+1]; if(jy == 0) fcs = fcc; else fcs = f[j-nx]; if(jy == ny - 1) fcn = fcc; else fcn = f[j+nx]; fn[j] = c0*(fce + fcw) + c1*(fcn + fcs) + c2*fcc; } float diffusion2d // ==================================================================== // // purpose : 2-dimensional diffusion equation solved by FDM // // date : May 16, 2008 // programmer : Takayuki Aoki // place : Tokyo Institute of Technology // ( int nx, /* x-dimensional grid size */ int ny, /* y-dimensional grid size */ float *f, /* dependent variable */ float *fn, /* updated dependent variable */ float kappa, /* diffusion coefficient */ float dt, /* time step interval */ float dx, /* grid spacing in the x-direction */ float dy /* grid spacing in the y-direction */ ) // -------------------------------------------------------------------- { float c0 = kappa*dt/(dx*dx), c1 = kappa*dt/(dy*dy), c2 = 1.0 - 2.0*(c0 + c1); dim3 grid(nx/blockDim_x,ny/blockDim_y,1), threads(blockDim_x,blockDim_y,1); cuda_diffusion2d_0<<< grid, threads >>>(f,fn,nx,ny,c0,c1,c2); return (float)(nx*ny)*7.0; } int main() { struct timeval start,finish; double duration; float flops=0.0; int imax = 256; int jmax = 256; int nodes = imax*jmax; float dx = 1.0/((float)imax - 1); float dy = 1.0/((float)jmax - 1); //initiate from host float h_Temp[nodes]; for(int i=0; i<imax; ++i){ for(int j=0; j<jmax; ++j){ int id = i*jmax + j; h_Temp[id] = sin((float)i * dx * pi )*sin( (float)j * dy * pi); } } float kappa = 0.1; float dt = 0.20*MIN2(dx*dx,dy*dy)/kappa; int itermax = 20601; array Temp0(nodes, h_Temp); //initiate on gpu array Temp1(nodes, h_Temp); float *d_Temp0 = Temp0.device<float>(); float *d_Temp1 = Temp1.device<float>(); sync(); /* for output data during loop */ float *h_copy = new float[nodes]; FILE *fp0; fp0 = fopen("peak.dat","w"); gettimeofday(&start, NULL); // time loop for(int iter =0; iter < itermax; ++iter){ flops += diffusion2d(imax,jmax, d_Temp0, d_Temp1, kappa, dt, dx, dy); //( // int nx, /* x-dimensional grid size */ // int ny, /* y-dimensional grid size */ // float *f, /* dependent variable */ // float *fn, /* updated dependent variable */ // float kappa, /* diffusion coefficient */ // float dt, /* time step interval */ // float dx, /* grid spacing in the x-direction */ // float dy /* grid spacing in the y-direction */ //) // Use unlock to return back to ArrayFire stream. Otherwise just use the // d_Temp0 and d_Temp1 pointer. // Temp0.unlock(); // Temp1.unlock(); //output to file /* Temp1.host(h_copy); int id = (imax/2)*jmax + (jmax/2); fprintf(fp0,"%f\t %f\n", iter*dt, h_copy[id]);*/ if(iter > 0 && iter % 100 == 0){ printf("time(%d) = %f\n", iter, (float)iter*dt); } //Update pointer d_Temp0 = d_Temp1; } gettimeofday(&finish, NULL); duration = ((double)(finish.tv_sec-start.tv_sec)*1000000 + (double)(finish.tv_usec-start.tv_usec)) / 1000000; printf("Total operations : %f\n", flops); flops = flops/(duration*1.0e06); printf("Elapsed time:%lf secs\n", duration); printf("Time per loop: %lf secs\n", duration/(double)itermax); printf("Performance : %.2f MFlops\n", flops); fclose(fp0); delete [] h_copy; /*Transfering calculation data to host */ float* h_Temp0 = new float[nodes]; Temp0.host(h_Temp0); /*Write data to file. Tecplot ASCII format. */ FILE* fp; fp = fopen("cuda_diff.dat","w"); fprintf(fp,"variables = \"x\", \"y\", \"Temp\" \n"); fprintf(fp,"zone t=\"test\" \n"); fprintf(fp,"i=%d,\t j=%d\n", imax, jmax); for(int i=0; i<imax; ++i){ for(int j=0; j<jmax; ++j){ int id = i*jmax + j; float xg = (float)i*dx; float yg = (float)j*dy; fprintf(fp,"%f %f %f\n", xg, yg, h_Temp0[id]); } } fclose(fp); delete [] h_Temp0; }
5003e0c609e3bf9023a86c8cf6f894efa36100c1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <hip/hip_runtime.h> long long getCurrentTime() { struct timeval te; gettimeofday(&te, NULL); // get current time long long microseconds = te.tv_sec*1000000LL + te.tv_usec; return microseconds; } #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) inline void __cudaSafeCall( hipError_t err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( hipSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } #endif return; } __inline__ __device__ int warpReduceSum(int val) { for (int offset = warpSize/2; offset > 0; offset /= 2) { #if TORCH_HIP_VERSION >= 9000 val += __shfl_down_sync(0xffffffff, val, offset); #else val += __shfl_down(val, offset); #endif } return val; } __inline__ __device__ int blockReduceSum(int val) { // Shared memory for 32 partial sums static __shared__ int shared[32]; int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; // Reduction within a warp val = warpReduceSum(val); // Write reduced value to shared memory if (lane == 0) shared[wid] = val; // Wait for all partial reductions __syncthreads(); // Read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0; // Final reduce within first warp if (wid==0) val = warpReduceSum(val); return val; } __global__ void reduce(int *A, int *sum, int N) { int val = 0; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { val += A[i]; } int valPerBlock = blockReduceSum(val); if (threadIdx.x == 0) { sum[blockIdx.x] = valPerBlock; } } int ReduceCPU(int *A, int N, double *cpuTime) { long long startTime = getCurrentTime(); int sum = 0; for (int i = 0; i < N; i++) { sum += A[i]; } *cpuTime = (double)(getCurrentTime() - startTime) / 1000000; return sum; } int ReduceGPU(int *A, int N, double *gpuOverallTime, double *gpuKernelTime) { long long startTime = getCurrentTime(); int threads = 512; int blocks = min((N + threads - 1) / threads, 1024); int *S = (int*)malloc(sizeof(int) * 1); int *dA; int *dSum; // Allocate memory on the device CudaSafeCall(hipMalloc(&dA, sizeof(int) * N)); CudaSafeCall(hipMalloc(&dSum, sizeof(int) * 1024)); // Copy the data from the host to the device CudaSafeCall(hipMemcpy(dA, A, N * sizeof (int), hipMemcpyHostToDevice)); CudaSafeCall(hipMemset(dSum, 0, sizeof (int) * 1024)); hipEvent_t start, stop; CudaSafeCall(hipEventCreate(&start)); CudaSafeCall(hipEventCreate(&stop)); // Launch the kernel CudaSafeCall(hipEventRecord(start)); hipLaunchKernelGGL(( reduce), dim3(blocks), dim3(threads), 0, 0, dA, dSum, N); hipLaunchKernelGGL(( reduce), dim3(1), dim3(1024), 0, 0, dSum, dSum, 1024); CudaSafeCall(hipEventRecord(stop)); CudaSafeCall(hipEventSynchronize(stop)); CudaSafeCall(hipDeviceSynchronize()); // Copy back the data from the host CudaSafeCall(hipMemcpy(S, dSum, 1 * sizeof (int), hipMemcpyDeviceToHost)); // Compute the performance numbers *gpuOverallTime = (double)(getCurrentTime() - startTime) / 1000000; float msec = 0; CudaSafeCall(hipEventElapsedTime(&msec, start, stop)); *gpuKernelTime = msec / 1000; // Cleanup CudaSafeCall(hipFree(dA)); CudaSafeCall(hipFree(dSum)); return *S; } int main(int argc, char **argv) { if (argc != 2) { printf("Usage: ./reduce repeat\n"); exit(0); } int REPEATS = atoi(argv[1]); for (int repeat = 0; repeat < REPEATS; repeat++) { printf("[Iteration %d]\n", repeat); for (int N = 1024; N < 256 * 1024 * 1024; N = N * 2) { int* A = NULL; double cpuTime = 0.0; double gpuOverallTime = 0.0; double gpuKernelTime = 0.0; A = (int*)malloc(sizeof(int) * N); for (int i = 0; i < N; i++) { A[i] = i; } // CPU version int expected = ReduceCPU(A, N, &cpuTime); // GPU version int computed = ReduceGPU(A, N, &gpuOverallTime, &gpuKernelTime); if (computed == expected) { float GB = (float)(N * 4) / (1024 * 1024 * 1024); printf ("\tVERIFIED, %d, CPU (%lf sec) %lf GB/s, GPU (Overall: %lf sec) %lf GB/s, GPU (Kernel: %lf sec) %lf GB/s\n", 4*N, cpuTime, GB / cpuTime, gpuOverallTime, GB / gpuOverallTime, gpuKernelTime, GB / gpuKernelTime); } else { printf ("\tFAILED, %d, computed: %d, excepted %u\n", 4*N, computed, expected); } free(A); } } }
5003e0c609e3bf9023a86c8cf6f894efa36100c1.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <cuda.h> long long getCurrentTime() { struct timeval te; gettimeofday(&te, NULL); // get current time long long microseconds = te.tv_sec*1000000LL + te.tv_usec; return microseconds; } #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) inline void __cudaSafeCall( cudaError err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( cudaSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; } __inline__ __device__ int warpReduceSum(int val) { for (int offset = warpSize/2; offset > 0; offset /= 2) { #if CUDA_VERSION >= 9000 val += __shfl_down_sync(0xffffffff, val, offset); #else val += __shfl_down(val, offset); #endif } return val; } __inline__ __device__ int blockReduceSum(int val) { // Shared memory for 32 partial sums static __shared__ int shared[32]; int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; // Reduction within a warp val = warpReduceSum(val); // Write reduced value to shared memory if (lane == 0) shared[wid] = val; // Wait for all partial reductions __syncthreads(); // Read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0; // Final reduce within first warp if (wid==0) val = warpReduceSum(val); return val; } __global__ void reduce(int *A, int *sum, int N) { int val = 0; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { val += A[i]; } int valPerBlock = blockReduceSum(val); if (threadIdx.x == 0) { sum[blockIdx.x] = valPerBlock; } } int ReduceCPU(int *A, int N, double *cpuTime) { long long startTime = getCurrentTime(); int sum = 0; for (int i = 0; i < N; i++) { sum += A[i]; } *cpuTime = (double)(getCurrentTime() - startTime) / 1000000; return sum; } int ReduceGPU(int *A, int N, double *gpuOverallTime, double *gpuKernelTime) { long long startTime = getCurrentTime(); int threads = 512; int blocks = min((N + threads - 1) / threads, 1024); int *S = (int*)malloc(sizeof(int) * 1); int *dA; int *dSum; // Allocate memory on the device CudaSafeCall(cudaMalloc(&dA, sizeof(int) * N)); CudaSafeCall(cudaMalloc(&dSum, sizeof(int) * 1024)); // Copy the data from the host to the device CudaSafeCall(cudaMemcpy(dA, A, N * sizeof (int), cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemset(dSum, 0, sizeof (int) * 1024)); cudaEvent_t start, stop; CudaSafeCall(cudaEventCreate(&start)); CudaSafeCall(cudaEventCreate(&stop)); // Launch the kernel CudaSafeCall(cudaEventRecord(start)); reduce<<<blocks, threads>>>(dA, dSum, N); reduce<<<1, 1024>>>(dSum, dSum, 1024); CudaSafeCall(cudaEventRecord(stop)); CudaSafeCall(cudaEventSynchronize(stop)); CudaSafeCall(cudaDeviceSynchronize()); // Copy back the data from the host CudaSafeCall(cudaMemcpy(S, dSum, 1 * sizeof (int), cudaMemcpyDeviceToHost)); // Compute the performance numbers *gpuOverallTime = (double)(getCurrentTime() - startTime) / 1000000; float msec = 0; CudaSafeCall(cudaEventElapsedTime(&msec, start, stop)); *gpuKernelTime = msec / 1000; // Cleanup CudaSafeCall(cudaFree(dA)); CudaSafeCall(cudaFree(dSum)); return *S; } int main(int argc, char **argv) { if (argc != 2) { printf("Usage: ./reduce repeat\n"); exit(0); } int REPEATS = atoi(argv[1]); for (int repeat = 0; repeat < REPEATS; repeat++) { printf("[Iteration %d]\n", repeat); for (int N = 1024; N < 256 * 1024 * 1024; N = N * 2) { int* A = NULL; double cpuTime = 0.0; double gpuOverallTime = 0.0; double gpuKernelTime = 0.0; A = (int*)malloc(sizeof(int) * N); for (int i = 0; i < N; i++) { A[i] = i; } // CPU version int expected = ReduceCPU(A, N, &cpuTime); // GPU version int computed = ReduceGPU(A, N, &gpuOverallTime, &gpuKernelTime); if (computed == expected) { float GB = (float)(N * 4) / (1024 * 1024 * 1024); printf ("\tVERIFIED, %d, CPU (%lf sec) %lf GB/s, GPU (Overall: %lf sec) %lf GB/s, GPU (Kernel: %lf sec) %lf GB/s\n", 4*N, cpuTime, GB / cpuTime, gpuOverallTime, GB / gpuOverallTime, gpuKernelTime, GB / gpuKernelTime); } else { printf ("\tFAILED, %d, computed: %d, excepted %u\n", 4*N, computed, expected); } free(A); } } }
166f6ba890a1aad0ed1c9cf45d1cfcf75087fb77.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <hip/hip_runtime.h> #include"device_launch_parameters.h" #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) #define N (55*1024) #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) #undef assert #define assert(arg) #endif //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); return ; } } // This will output the proper error string when calling hipGetLastError inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, hipGetErrorString( err ) ); return ; } } // end of CUDA Helper Functions __global__ void VecAdd(float *a,float *b,float *c) { long long i=threadIdx.x+blockIdx.x*blockDim.x; while(i<N) { a[i]=b[i]+c[i]; //a[i]/=2.0; //a[i]/=3.0; i+=blockDim.x+gridDim.x; } //printf("blockdim:%d\n",blockDim.x); assert(blockDim.x); } __global__ void add(int a,int b ,int *c) { *c=a+b; } int main(){ hipSetDevice(0); hipDeviceSynchronize(); hipDeviceSynchronize(); float A[N],B[N],C[N]; for(long long i=0;i!=N;++i) { B[i]=i; C[i]=i; } float *dec_a,*dec_b,*dec_c; checkCudaErrors( hipMalloc((void**) &dec_a, sizeof(int)*N)); checkCudaErrors( hipMalloc((void**) &dec_b, sizeof(int)*N)); checkCudaErrors( hipMalloc((void**) &dec_c, sizeof(int)*N)); checkCudaErrors( hipMemcpy(dec_b,B,sizeof(int)*N,hipMemcpyHostToDevice)); checkCudaErrors( hipMemcpy(dec_c,C,sizeof(int)*N,hipMemcpyHostToDevice)); hipLaunchKernelGGL(( VecAdd), dim3(256),dim3(128), 0, 0, dec_a,dec_b,dec_c); checkCudaErrors( hipMemcpy(A,dec_a,sizeof(int)*N,hipMemcpyDeviceToHost)); hipFree(dec_a); hipFree(dec_b); hipFree(dec_c); bool suc=true; for (long long i=0;i!=N;++i) { if (A[i]!=B[i]+C[i]) { suc=false; } } if (suc) { printf("we did it\n"); } else { printf("we fail\n"); } //matAdd<<<1,dimBlock>>>(A,B,C); int c; int *resultc; checkCudaErrors( hipMalloc((void**) &resultc, sizeof(int))); hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 2,7,resultc); checkCudaErrors( hipMemcpy(&c,resultc,sizeof(int),hipMemcpyDeviceToHost)); printf("%d\n",c); hipFree(resultc); getchar(); hipDeviceReset(); return 0; }
166f6ba890a1aad0ed1c9cf45d1cfcf75087fb77.cu
#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <cuda_runtime.h> #include"device_launch_parameters.h" #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) #define N (55*1024) #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) #undef assert #define assert(arg) #endif //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); return ; } } // This will output the proper error string when calling cudaGetLastError inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); return ; } } // end of CUDA Helper Functions __global__ void VecAdd(float *a,float *b,float *c) { long long i=threadIdx.x+blockIdx.x*blockDim.x; while(i<N) { a[i]=b[i]+c[i]; //a[i]/=2.0; //a[i]/=3.0; i+=blockDim.x+gridDim.x; } //printf("blockdim:%d\n",blockDim.x); assert(blockDim.x); } __global__ void add(int a,int b ,int *c) { *c=a+b; } int main(){ cudaSetDevice(0); cudaDeviceSynchronize(); cudaThreadSynchronize(); float A[N],B[N],C[N]; for(long long i=0;i!=N;++i) { B[i]=i; C[i]=i; } float *dec_a,*dec_b,*dec_c; checkCudaErrors( cudaMalloc((void**) &dec_a, sizeof(int)*N)); checkCudaErrors( cudaMalloc((void**) &dec_b, sizeof(int)*N)); checkCudaErrors( cudaMalloc((void**) &dec_c, sizeof(int)*N)); checkCudaErrors( cudaMemcpy(dec_b,B,sizeof(int)*N,cudaMemcpyHostToDevice)); checkCudaErrors( cudaMemcpy(dec_c,C,sizeof(int)*N,cudaMemcpyHostToDevice)); VecAdd<<<256,128>>>(dec_a,dec_b,dec_c); checkCudaErrors( cudaMemcpy(A,dec_a,sizeof(int)*N,cudaMemcpyDeviceToHost)); cudaFree(dec_a); cudaFree(dec_b); cudaFree(dec_c); bool suc=true; for (long long i=0;i!=N;++i) { if (A[i]!=B[i]+C[i]) { suc=false; } } if (suc) { printf("we did it\n"); } else { printf("we fail\n"); } //matAdd<<<1,dimBlock>>>(A,B,C); int c; int *resultc; checkCudaErrors( cudaMalloc((void**) &resultc, sizeof(int))); add<<<1,1>>>(2,7,resultc); checkCudaErrors( cudaMemcpy(&c,resultc,sizeof(int),cudaMemcpyDeviceToHost)); printf("%d\n",c); cudaFree(resultc); getchar(); cudaThreadExit(); return 0; }
89e0d9a16838545eeb51d51ed53a796ab4883a19.hip
// !!! This is a file automatically generated by hipify!!! #include "InstanceTablePlaneSweep.h" #include <thrust/device_vector.h> #include <thrust/sort.h> #include "../Common/MiningCommon.h" #include "../Entities/InstanceTable.h" #include "../Common/CommonOperations.h" #include <algorithm> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <thrust/execution_policy.h> namespace PlaneSweep { namespace InstanceTable { __global__ void countNeighbours( float* xCoords , float* yCoords , FeatureInstance* instances , int count , float radius , float radiusSquared , int warpsCount , UInt* resultNeighboursCount) { // btid int blockThreadId = threadIdx.x; //gid int globalId = computeLinearAddressFrom2D(); // wid int warpId = globalId / 32; // bwid int blockWarpId = blockThreadId / 32; // wtid int warpThreadId = threadIdx.x % 32; __shared__ volatile bool * flags; __shared__ volatile UInt * found; if (blockThreadId == 0) { flags = static_cast<bool*>(malloc((blockDim.x / 32) * sizeof(bool))); found = static_cast<UInt*>(malloc(blockDim.x * sizeof(unsigned int))); } __syncthreads(); //uint start= wid * ((inSize-1 ) / warpCount ) + max(0, - warpCount + wid + (inSize - 1) % warpCount ) + 1; int start = warpId * ((count - 1) / warpsCount) + max(0, -warpsCount + warpId + (count - 1) % warpsCount) + 1; //uint stop=(wid + 1) * ((inSize-1 ) / warpCount ) + max(0, -warpCount + (inSize- 1) % warpCount + wid + 1); int stop = (warpId + 1) * ((count - 1) / warpsCount) + max(0, -warpsCount + warpId + 1 + (count - 1) % warpsCount); if (warpId < warpsCount) { found[blockThreadId] = 0; for (UInt i = start; i <= stop; i++) { float px = xCoords[i]; float py = yCoords[i]; flags[blockWarpId] = false; for (int j = i - 32; j >= -32; j -= 32) { int localId = warpThreadId + j; if (localId >= 0) { if (instances[i].fields.featureId != instances[localId].fields.featureId) { float lx = xCoords[localId]; if ((px - lx) > radius) flags[blockWarpId] = true; float ly = yCoords[localId]; if ((MiningCommon::distance(px, py, lx, ly) <= radiusSquared)) found[blockThreadId] += 1; } } if (flags[blockWarpId]) { break; } } } MiningCommon::intraWarpReduce(found + blockWarpId * 32); if (warpThreadId == 0) { resultNeighboursCount[warpId] = found[blockWarpId * 32 + 31]; } } __syncthreads(); if (blockThreadId == 0) { free(const_cast<bool*>(flags)); free(const_cast<UInt*>(found)); } } // -------------------------------------------------------------------------------------------------------------------------------------- __global__ void findNeighbours( float* xCoords , float* yCoords , FeatureInstance* instances , int count , float radius , float radiusSquared , int warpsCount , UInt *outStarts , FeatureInstance* out_a , FeatureInstance* out_b) { // btid int blockThreadId = threadIdx.x; //gid int globalId = computeLinearAddressFrom2D(); // wid int warpId = globalId / 32; // bwid int blockWarpId = blockThreadId / 32; // wtid int warpThreadId = threadIdx.x % 32; // const UInt underBuffId = blockWarpId * 64 + warpThreadId; // const int aboveBuffId = blockWarpId * 64 + 32 + warpThreadId; __shared__ volatile UInt* scanBuf; __shared__ volatile bool* flags; __shared__ volatile bool* found; //volatile __shared__ UInt* buffA; //volatile __shared__ UInt* buffB; __shared__ UInt* warpBuffPos; FeatureInstance temp_a; FeatureInstance temp_b; // UInt localStart = 0; if (blockThreadId == 0) { // check Allocating http://www.drdobbs.com/parallel/a-massively-parallel-stack-for-data-allo/240162018?pgno=1 // measure dynamic allocating in different warps scanBuf = static_cast<UInt*>(malloc(blockDim.x * sizeof(unsigned int))); flags = static_cast<bool*>(malloc((blockDim.x / 32) * sizeof(bool))); found = static_cast<bool*>(malloc(blockDim.x * sizeof(bool))); //buffA = static_cast<UInt*>(malloc(blockDim.x * sizeof(UInt))); //buffB = static_cast<UInt*>(malloc(blockDim.x * sizeof(UInt))); warpBuffPos = static_cast<UInt*>(malloc(blockDim.x / 32 * sizeof(UInt))); } __syncthreads(); if (warpThreadId == 0) warpBuffPos[warpThreadId] = 0; //uint start= wid * ((inSize-1 ) / warpCount ) + max(0, - warpCount + wid + (inSize - 1) % warpCount ) + 1; int start = warpId * ((count - 1) / warpsCount) + max(0, -warpsCount + warpId + (count - 1) % warpsCount) + 1; //uint stop=(wid + 1) * ((inSize-1 ) / warpCount ) + max(0, -warpCount + (inSize- 1) % warpCount + wid + 1); int stop = (warpId + 1) * ((count - 1) / warpsCount) + max(0, -warpsCount + (count - 1) % warpsCount + warpId + 1); if (warpId < warpsCount) { UInt outStart = outStarts[warpId]; for (UInt actualRootId = start; actualRootId <= stop; ++actualRootId) { float px = xCoords[actualRootId]; float py = yCoords[actualRootId]; flags[blockWarpId] = false; for (int j = actualRootId - 32; j >= -32; j -= 32) { int localId = warpThreadId + j; found[blockThreadId] = false; scanBuf[blockThreadId] = 0; if (localId >= 0) { if (instances[actualRootId].fields.featureId != instances[localId].fields.featureId) { float lx = xCoords[localId]; if ((px - lx) > radius) { flags[blockWarpId] = true; } float ly = yCoords[localId]; if ((MiningCommon::distance(px, py, lx, ly) <= radiusSquared)) { found[blockThreadId] = true; if (instances[actualRootId].field < instances[localId].field) { temp_a = instances[actualRootId]; temp_b = instances[localId]; } else { temp_a = instances[localId]; temp_b = instances[actualRootId]; } scanBuf[blockThreadId] = 1; } } } int lasteL = scanBuf[blockWarpId * 32 + 31]; MiningCommon::intraWarpScan<UInt>(scanBuf + blockWarpId * 32); __syncthreads(); //if (warpBuffPos[blockWarpId] + scanBuf) if (found[blockThreadId]) { int pos = scanBuf[blockThreadId] + outStart; out_a[pos] = temp_a; out_b[pos] = temp_b; } outStart += scanBuf[blockWarpId * 32 + 31] + lasteL; /* scanBuf[blockThreadId] = found[blockThreadId]; intraWarpScan(scanBuf); UInt oldLocalStart = localStart; if (found[blockThreadId]) { UInt index = blockWarpId * 64 + (localStart + scanBuf[blockThreadId]) % 64; buffA[index] = temp_a.field; buffB[index] = temp_b.field; } // ( // localstart // + last value from scanbuff for last thread in warp // + last value from found for last thread in warp // ) mod 64 localStart = (localStart + scanBuf[blockWarpId * 32 + 31] + found[blockWarpId * 32 + 31]) % 64; if (oldLocalStart < 32 && localStart >= 32) { out_a[outStart + warpThreadId].field = buffA[underBuffId]; out_b[outStart + warpThreadId].field = buffB[underBuffId]; outStart += 32; } else if (localStart < 32) { out_a[outStart + warpThreadId].field = buffA[aboveBuffId]; out_b[outStart + warpThreadId].field = buffB[aboveBuffId]; outStart += 32; } */ if (flags[blockWarpId]) { break; } } } /* if (localStart < 32 && warpThreadId < localStart) { out_a[outStart + warpThreadId].field = buffA[underBuffId]; out_b[outStart + warpThreadId].field = buffB[underBuffId]; } else if (localStart >= 32 && warpThreadId < localStart - 32) { out_a[outStart + warpThreadId].field = buffA[aboveBuffId]; out_b[outStart + warpThreadId].field = buffB[aboveBuffId]; } */ } __syncthreads(); if (blockThreadId == 0) { free(const_cast<UInt*>(scanBuf)); free(const_cast<bool*>(flags)); free(const_cast<bool*>(found)); //free(const_cast<UInt*>(buffA)); //free(const_cast<UInt*>(buffB)); } } // -------------------------------------------------------------------------------------------------------------------------------------- struct XYFeatureInstanceComparator { __host__ __device__ bool operator()( const thrust::tuple<float, float, const FeatureInstance>& o1 , const thrust::tuple<float, float, const FeatureInstance>& o2) { return o1.get<0>() < o2.get<0>(); } }; void SortByXAxis( thrust::device_vector<float>& xCoords , thrust::device_vector<float>& yCoords , thrust::device_vector<FeatureInstance>& instances ) { typedef thrust::device_ptr<FeatureInstance> FeatureInstanceIterator; typedef thrust::device_ptr<float> FloatIterator; typedef thrust::zip_iterator<thrust::tuple<FloatIterator, FloatIterator, FeatureInstanceIterator>> SortIter; SortIter begin(thrust::make_tuple(xCoords.begin().base(), yCoords.begin().base(), instances.begin().base())); SortIter end(thrust::make_tuple(xCoords.end().base(), yCoords.end().base(), instances.end().base())); thrust::sort(thrust::device, begin, end, XYFeatureInstanceComparator()); } // -------------------------------------------------------------------------------------------------------------------------------------- __host__ void PlaneSweep( thrust::device_vector<float>& xCoords , thrust::device_vector<float>& yCoords , thrust::device_vector<FeatureInstance>& instances , UInt count , float distanceTreshold , PlaneSweepTableInstanceResultPtr result) { UInt warpsCount = count; thrust::device_vector<UInt> neighboursCount(count); dim3 grid; SortByXAxis( xCoords , yCoords , instances ); findSmallest2D(warpsCount * 32, 256, grid.x, grid.y); hipLaunchKernelGGL(( countNeighbours) , dim3(grid), dim3(256) , 0, 0, thrust::raw_pointer_cast(xCoords.data()) , thrust::raw_pointer_cast(yCoords.data()) , thrust::raw_pointer_cast(instances.data()) , count , distanceTreshold , distanceTreshold * distanceTreshold , warpsCount , thrust::raw_pointer_cast(neighboursCount.data()) ); CUDA_CHECK_RETURN(hipDeviceSynchronize()); UInt totalPairsCount = neighboursCount[count - 1]; thrust::exclusive_scan(neighboursCount.begin(), neighboursCount.end(), neighboursCount.begin()); totalPairsCount += neighboursCount[count - 1]; typedef thrust::device_vector<FeatureInstance> InstancesDeviceVector; result->pairsA = InstancesDeviceVector(totalPairsCount); result->pairsB = InstancesDeviceVector(totalPairsCount); hipLaunchKernelGGL(( findNeighbours) , dim3(grid), dim3(256) , 0, 0, thrust::raw_pointer_cast(xCoords.data()) , thrust::raw_pointer_cast(yCoords.data()) , thrust::raw_pointer_cast(instances.data()) , count , distanceTreshold , distanceTreshold * distanceTreshold , warpsCount , neighboursCount.data().get() , result->pairsA.data().get() , result->pairsB.data().get() ); CUDA_CHECK_RETURN(hipDeviceSynchronize()); MiningCommon::zipSort( result->pairsA , result->pairsB ); CUDA_CHECK_RETURN(hipDeviceSynchronize()); } // -------------------------------------------------------------------------------------------------------------------------------------- } }
89e0d9a16838545eeb51d51ed53a796ab4883a19.cu
#include "InstanceTablePlaneSweep.h" #include <thrust/device_vector.h> #include <thrust/sort.h> #include "../Common/MiningCommon.h" #include "../Entities/InstanceTable.h" #include "../Common/CommonOperations.h" #include <algorithm> #include <cuda.h> #include <cuda_runtime_api.h> #include <thrust/execution_policy.h> namespace PlaneSweep { namespace InstanceTable { __global__ void countNeighbours( float* xCoords , float* yCoords , FeatureInstance* instances , int count , float radius , float radiusSquared , int warpsCount , UInt* resultNeighboursCount) { // btid int blockThreadId = threadIdx.x; //gid int globalId = computeLinearAddressFrom2D(); // wid int warpId = globalId / 32; // bwid int blockWarpId = blockThreadId / 32; // wtid int warpThreadId = threadIdx.x % 32; __shared__ volatile bool * flags; __shared__ volatile UInt * found; if (blockThreadId == 0) { flags = static_cast<bool*>(malloc((blockDim.x / 32) * sizeof(bool))); found = static_cast<UInt*>(malloc(blockDim.x * sizeof(unsigned int))); } __syncthreads(); //uint start= wid * ((inSize-1 ) / warpCount ) + max(0, - warpCount + wid + (inSize - 1) % warpCount ) + 1; int start = warpId * ((count - 1) / warpsCount) + max(0, -warpsCount + warpId + (count - 1) % warpsCount) + 1; //uint stop=(wid + 1) * ((inSize-1 ) / warpCount ) + max(0, -warpCount + (inSize- 1) % warpCount + wid + 1); int stop = (warpId + 1) * ((count - 1) / warpsCount) + max(0, -warpsCount + warpId + 1 + (count - 1) % warpsCount); if (warpId < warpsCount) { found[blockThreadId] = 0; for (UInt i = start; i <= stop; i++) { float px = xCoords[i]; float py = yCoords[i]; flags[blockWarpId] = false; for (int j = i - 32; j >= -32; j -= 32) { int localId = warpThreadId + j; if (localId >= 0) { if (instances[i].fields.featureId != instances[localId].fields.featureId) { float lx = xCoords[localId]; if ((px - lx) > radius) flags[blockWarpId] = true; float ly = yCoords[localId]; if ((MiningCommon::distance(px, py, lx, ly) <= radiusSquared)) found[blockThreadId] += 1; } } if (flags[blockWarpId]) { break; } } } MiningCommon::intraWarpReduce(found + blockWarpId * 32); if (warpThreadId == 0) { resultNeighboursCount[warpId] = found[blockWarpId * 32 + 31]; } } __syncthreads(); if (blockThreadId == 0) { free(const_cast<bool*>(flags)); free(const_cast<UInt*>(found)); } } // -------------------------------------------------------------------------------------------------------------------------------------- __global__ void findNeighbours( float* xCoords , float* yCoords , FeatureInstance* instances , int count , float radius , float radiusSquared , int warpsCount , UInt *outStarts , FeatureInstance* out_a , FeatureInstance* out_b) { // btid int blockThreadId = threadIdx.x; //gid int globalId = computeLinearAddressFrom2D(); // wid int warpId = globalId / 32; // bwid int blockWarpId = blockThreadId / 32; // wtid int warpThreadId = threadIdx.x % 32; // const UInt underBuffId = blockWarpId * 64 + warpThreadId; // const int aboveBuffId = blockWarpId * 64 + 32 + warpThreadId; __shared__ volatile UInt* scanBuf; __shared__ volatile bool* flags; __shared__ volatile bool* found; //volatile __shared__ UInt* buffA; //volatile __shared__ UInt* buffB; __shared__ UInt* warpBuffPos; FeatureInstance temp_a; FeatureInstance temp_b; // UInt localStart = 0; if (blockThreadId == 0) { // check Allocating http://www.drdobbs.com/parallel/a-massively-parallel-stack-for-data-allo/240162018?pgno=1 // measure dynamic allocating in different warps scanBuf = static_cast<UInt*>(malloc(blockDim.x * sizeof(unsigned int))); flags = static_cast<bool*>(malloc((blockDim.x / 32) * sizeof(bool))); found = static_cast<bool*>(malloc(blockDim.x * sizeof(bool))); //buffA = static_cast<UInt*>(malloc(blockDim.x * sizeof(UInt))); //buffB = static_cast<UInt*>(malloc(blockDim.x * sizeof(UInt))); warpBuffPos = static_cast<UInt*>(malloc(blockDim.x / 32 * sizeof(UInt))); } __syncthreads(); if (warpThreadId == 0) warpBuffPos[warpThreadId] = 0; //uint start= wid * ((inSize-1 ) / warpCount ) + max(0, - warpCount + wid + (inSize - 1) % warpCount ) + 1; int start = warpId * ((count - 1) / warpsCount) + max(0, -warpsCount + warpId + (count - 1) % warpsCount) + 1; //uint stop=(wid + 1) * ((inSize-1 ) / warpCount ) + max(0, -warpCount + (inSize- 1) % warpCount + wid + 1); int stop = (warpId + 1) * ((count - 1) / warpsCount) + max(0, -warpsCount + (count - 1) % warpsCount + warpId + 1); if (warpId < warpsCount) { UInt outStart = outStarts[warpId]; for (UInt actualRootId = start; actualRootId <= stop; ++actualRootId) { float px = xCoords[actualRootId]; float py = yCoords[actualRootId]; flags[blockWarpId] = false; for (int j = actualRootId - 32; j >= -32; j -= 32) { int localId = warpThreadId + j; found[blockThreadId] = false; scanBuf[blockThreadId] = 0; if (localId >= 0) { if (instances[actualRootId].fields.featureId != instances[localId].fields.featureId) { float lx = xCoords[localId]; if ((px - lx) > radius) { flags[blockWarpId] = true; } float ly = yCoords[localId]; if ((MiningCommon::distance(px, py, lx, ly) <= radiusSquared)) { found[blockThreadId] = true; if (instances[actualRootId].field < instances[localId].field) { temp_a = instances[actualRootId]; temp_b = instances[localId]; } else { temp_a = instances[localId]; temp_b = instances[actualRootId]; } scanBuf[blockThreadId] = 1; } } } int lasteL = scanBuf[blockWarpId * 32 + 31]; MiningCommon::intraWarpScan<UInt>(scanBuf + blockWarpId * 32); __syncthreads(); //if (warpBuffPos[blockWarpId] + scanBuf) if (found[blockThreadId]) { int pos = scanBuf[blockThreadId] + outStart; out_a[pos] = temp_a; out_b[pos] = temp_b; } outStart += scanBuf[blockWarpId * 32 + 31] + lasteL; /* scanBuf[blockThreadId] = found[blockThreadId]; intraWarpScan(scanBuf); UInt oldLocalStart = localStart; if (found[blockThreadId]) { UInt index = blockWarpId * 64 + (localStart + scanBuf[blockThreadId]) % 64; buffA[index] = temp_a.field; buffB[index] = temp_b.field; } // ( // localstart // + last value from scanbuff for last thread in warp // + last value from found for last thread in warp // ) mod 64 localStart = (localStart + scanBuf[blockWarpId * 32 + 31] + found[blockWarpId * 32 + 31]) % 64; if (oldLocalStart < 32 && localStart >= 32) { out_a[outStart + warpThreadId].field = buffA[underBuffId]; out_b[outStart + warpThreadId].field = buffB[underBuffId]; outStart += 32; } else if (localStart < 32) { out_a[outStart + warpThreadId].field = buffA[aboveBuffId]; out_b[outStart + warpThreadId].field = buffB[aboveBuffId]; outStart += 32; } */ if (flags[blockWarpId]) { break; } } } /* if (localStart < 32 && warpThreadId < localStart) { out_a[outStart + warpThreadId].field = buffA[underBuffId]; out_b[outStart + warpThreadId].field = buffB[underBuffId]; } else if (localStart >= 32 && warpThreadId < localStart - 32) { out_a[outStart + warpThreadId].field = buffA[aboveBuffId]; out_b[outStart + warpThreadId].field = buffB[aboveBuffId]; } */ } __syncthreads(); if (blockThreadId == 0) { free(const_cast<UInt*>(scanBuf)); free(const_cast<bool*>(flags)); free(const_cast<bool*>(found)); //free(const_cast<UInt*>(buffA)); //free(const_cast<UInt*>(buffB)); } } // -------------------------------------------------------------------------------------------------------------------------------------- struct XYFeatureInstanceComparator { __host__ __device__ bool operator()( const thrust::tuple<float, float, const FeatureInstance>& o1 , const thrust::tuple<float, float, const FeatureInstance>& o2) { return o1.get<0>() < o2.get<0>(); } }; void SortByXAxis( thrust::device_vector<float>& xCoords , thrust::device_vector<float>& yCoords , thrust::device_vector<FeatureInstance>& instances ) { typedef thrust::device_ptr<FeatureInstance> FeatureInstanceIterator; typedef thrust::device_ptr<float> FloatIterator; typedef thrust::zip_iterator<thrust::tuple<FloatIterator, FloatIterator, FeatureInstanceIterator>> SortIter; SortIter begin(thrust::make_tuple(xCoords.begin().base(), yCoords.begin().base(), instances.begin().base())); SortIter end(thrust::make_tuple(xCoords.end().base(), yCoords.end().base(), instances.end().base())); thrust::sort(thrust::device, begin, end, XYFeatureInstanceComparator()); } // -------------------------------------------------------------------------------------------------------------------------------------- __host__ void PlaneSweep( thrust::device_vector<float>& xCoords , thrust::device_vector<float>& yCoords , thrust::device_vector<FeatureInstance>& instances , UInt count , float distanceTreshold , PlaneSweepTableInstanceResultPtr result) { UInt warpsCount = count; thrust::device_vector<UInt> neighboursCount(count); dim3 grid; SortByXAxis( xCoords , yCoords , instances ); findSmallest2D(warpsCount * 32, 256, grid.x, grid.y); countNeighbours <<< grid, 256 >>> ( thrust::raw_pointer_cast(xCoords.data()) , thrust::raw_pointer_cast(yCoords.data()) , thrust::raw_pointer_cast(instances.data()) , count , distanceTreshold , distanceTreshold * distanceTreshold , warpsCount , thrust::raw_pointer_cast(neighboursCount.data()) ); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); UInt totalPairsCount = neighboursCount[count - 1]; thrust::exclusive_scan(neighboursCount.begin(), neighboursCount.end(), neighboursCount.begin()); totalPairsCount += neighboursCount[count - 1]; typedef thrust::device_vector<FeatureInstance> InstancesDeviceVector; result->pairsA = InstancesDeviceVector(totalPairsCount); result->pairsB = InstancesDeviceVector(totalPairsCount); findNeighbours <<< grid, 256 >>> ( thrust::raw_pointer_cast(xCoords.data()) , thrust::raw_pointer_cast(yCoords.data()) , thrust::raw_pointer_cast(instances.data()) , count , distanceTreshold , distanceTreshold * distanceTreshold , warpsCount , neighboursCount.data().get() , result->pairsA.data().get() , result->pairsB.data().get() ); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); MiningCommon::zipSort( result->pairsA , result->pairsB ); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); } // -------------------------------------------------------------------------------------------------------------------------------------- } }
f4276f56caba20a2e7e9d6f833cd2e8a608480a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by lshi on 17-9-25. // #include "deform_conv3d_cuda_kernel.h" #include "THH/THH.h" #include "TH/TH.h" #define THREAD_PRE_BLOCK 1024 #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif //--------------------------------------------------forward--------------------------------------------- template<typename DType> __device__ DType Tri_Linear(const DType *bottom_data, const int length, const int height, const int width, const double l, const int h, const int w) { //length and area const int data_width_1d = width; const int data_width_2d = height * width; //get the cube, the function int can not be used in template int l_low = int(l); int l_high = (l >= length - 1 || l <= 0) ? l_low : l_low + 1; //the corner, format is lhw DType c0 = bottom_data[l_low * data_width_2d + h * data_width_1d + w]; DType c1 = bottom_data[l_high * data_width_2d + h * data_width_1d + w]; //calculate the distance between the point and corner, using 1 to make sure using the low if equal DType l_length = l - l_low; DType h_length = 1 - l_length; //interpolation DType c = c0 * h_length + c1 * l_length; return c; } template<typename DType> __global__ void deformable_im2col_gpu_kernel( const int num_kernels, const DType *data_im, const DType *data_offset, const int input_c, const int input_l, const int input_h, const int input_w, const int kernel_l, const int kernel_h, const int kernel_w, const int pad_l, const int pad_h, const int pad_w, const int stride_l, const int stride_h, const int stride_w, const int channel_per_deformable_group, const int output_l, const int output_h, const int output_w, DType *data_col) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels; index += blockDim.x * gridDim.x) { const int input_v = input_l * input_w * input_h; const int output_v = output_l * output_h * output_w; const int kernel_v = kernel_l * kernel_h * kernel_w; //L"H"W"CL'H'W' const int w_out = index % output_w; const int h_out = index / output_w % output_h; const int l_out = index / output_w / output_h % output_l; const int w_kernel = index / output_v % kernel_w; const int h_kernel = index / output_v / kernel_w % kernel_h; const int l_kernel = index / output_v / kernel_w / kernel_h % kernel_l; const int c_in = index / output_v / kernel_v % input_c; const int l_in = l_out * stride_l - pad_l; const int h_in = h_out * stride_h - pad_h; const int w_in = w_out * stride_w - pad_w; const int g_off = c_in / channel_per_deformable_group; // const int deform_group = input_c / channel_per_deformable_group; // printf("%d %d %d %d %d %d %d %d %d\n",threadIdx.x,b_in,l_out,h_out,w_out,c_in,l_kernel,h_kernel,w_kernel); //(CL'H'W')(L"H"W") DType *data_col_base_ptr = data_col + (c_in * kernel_v + l_kernel * kernel_h * kernel_w + h_kernel * kernel_w + w_kernel) * output_v + l_out * output_h * output_w + h_out * output_w + w_out; //CLHW const DType *data_in_base_ptr = data_im + c_in * input_v; //GL"H"W" const DType *data_offset_base_ptr = data_offset + g_off*output_v; const int offset = l_out*output_h*output_w+h_out*output_w+w_out; const DType l_in_after = l_in + l_kernel + data_offset_base_ptr[offset]; const DType h_in_after = h_in + h_kernel; const DType w_in_after = w_in + w_kernel; // printf("%d %f %f %f\n",threadIdx.x,l_in_after,h_in_after,w_in_after); DType val = 0; if (l_in_after > -1 && h_in_after > -1 && w_in_after > -1 && l_in_after < input_l && h_in_after < input_h && w_in_after < input_w) { //interpolation val = Tri_Linear(data_in_base_ptr, input_l, input_h, input_w, l_in_after, h_in_after, w_in_after); } *data_col_base_ptr = val; } } inline int get_cuda_blocks(const int num_kernel) { return (num_kernel + THREAD_PRE_BLOCK - 1) / THREAD_PRE_BLOCK; } void deformable_im2col(hipStream_t stream, const double *data_in, const double *data_offset, const int input_c, const int input_l, const int input_h, const int input_w, const int output_l, const int output_h, const int output_w, const int kernel_l, const int kernel_h, const int kernel_w, const int pad_l, const int pad_h, const int pad_w, const int stride_l, const int stride_h, const int stride_w, const int channel_per_deformable_group, double *data_col) { int num_kernels = output_l * output_h * output_w * input_c * kernel_l * kernel_h * kernel_w; deformable_im2col_gpu_kernel << < get_cuda_blocks(num_kernels), THREAD_PRE_BLOCK, 0, stream >> > ( num_kernels, data_in, data_offset, input_c, input_l, input_h, input_w, kernel_l, kernel_h, kernel_w, pad_l, pad_h, pad_w, stride_l, stride_h, stride_w, channel_per_deformable_group, output_l, output_h, output_w, data_col); } //---------------------------------------------backward to input--------------------------------------------------- template<typename DType> __global__ void deformable_col2im_input_gpu_kernel( const int num_kernels, const DType *data_col, const DType *data_offset, const int input_c, const int input_l, const int input_h, const int input_w, const int output_l, const int output_h, const int output_w, const int kernel_l, const int kernel_h, const int kernel_w, const int pad_l, const int pad_h, const int pad_w, const int stride_l, const int stride_h, const int stride_w, const int channel_per_deformable_group, DType *grad_im) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels; index += blockDim.x * gridDim.x) { const int input_v = input_l * input_w * input_h; const int output_v = output_l * output_h * output_w; const int kernel_v = kernel_l * kernel_h * kernel_w; //L"H"W"CL'H'W' const int w_kernel = index % kernel_w; const int h_kernel = index / kernel_w % kernel_h; const int l_kernel = index / kernel_w / kernel_h % kernel_l; const int c_in = index / kernel_v % input_c; const int w_out = index / kernel_v / input_c % output_w; const int h_out = index / kernel_v / input_c / output_w % output_h; const int l_out = index / kernel_v / input_c / output_w / output_h % output_l; const int l_in = l_out * stride_l - pad_l; const int h_in = h_out * stride_h - pad_h; const int w_in = w_out * stride_w - pad_w; const int g_off = c_in / channel_per_deformable_group; // const int deform_group = input_c / channel_per_deformable_group; //CL'H'W' L"H"W" const DType *data_col_base_ptr = data_col + (c_in * kernel_v + l_kernel * kernel_h * kernel_w + h_kernel * kernel_w + w_kernel) * output_v + l_out * output_h * output_w + h_out * output_w + w_out; //GL"H"W" const DType *data_offset_base_ptr = data_offset + g_off*output_v; const int offset = l_out*output_h*output_w+h_out*output_w+w_out; //CLHW DType *grad_in_base_ptr = grad_im + c_in * input_v; // printf("%d %d %d %d %d %x\n", threadIdx.x,b_in,c_in,input_c, input_v, grad_in_base_ptr); const int data_width_1d = input_w; const int data_width_2d = input_h * input_w; const DType l_in_after = l_in + l_kernel + data_offset_base_ptr[offset]; const DType h_in_after = h_in + h_kernel; const DType w_in_after = w_in + w_kernel; // printf("%d %f %f %f\n", threadIdx.x,l_in_after,h_in_after,w_in_after); if (l_in_after > -1 && h_in_after > -1 && w_in_after > -1 && l_in_after < input_l && h_in_after < input_h && w_in_after < input_w) { //eight point around int l_low = int(l_in_after); int l_high = (l_in_after >= input_l - 1 || l_in_after <= 0) ? l_low : l_low + 1; int a0 = l_low * data_width_2d + h_in_after * data_width_1d + w_in_after; int a1 = l_high * data_width_2d + h_in_after * data_width_1d + w_in_after; DType l_length = l_in_after - l_low; DType h_length = 1 - l_length; //grad for input atomicAdd( grad_in_base_ptr + a0, h_length * (*data_col_base_ptr)); atomicAdd( grad_in_base_ptr + a1, l_length * (*data_col_base_ptr)); } } } //template<typename DType> void deformable_col2im_input(hipStream_t stream, const double *data_col, const double *data_offset, const int input_c, const int input_l, const int input_h, const int input_w, const int output_l, const int output_h, const int output_w, const int kernel_l, const int kernel_h, const int kernel_w, const int pad_l, const int pad_h, const int pad_w, const int stride_l, const int stride_h, const int stride_w, const int channel_per_deformable_group, double *grad_im) { const int num_kernels = output_l * output_h * output_w * input_c * kernel_l * kernel_h * kernel_w; deformable_col2im_input_gpu_kernel << < get_cuda_blocks(num_kernels), THREAD_PRE_BLOCK, 0, stream >> > ( num_kernels, data_col, data_offset, input_c, input_l, input_h, input_w, output_l, output_h, output_w, kernel_l, kernel_h, kernel_w, pad_l, pad_h, pad_w, stride_l, stride_h, stride_w, channel_per_deformable_group, grad_im ); } //--------------------------------------------------backward to offset--------------------------------------------- template<typename DType> __global__ void deformable_col2im_offset_gpu_kernel( const int num_kernels, const DType *data_col, const DType *data_im, const DType *data_offset, const int input_c, const int input_l, const int input_h, const int input_w, const int output_l, const int output_h, const int output_w, const int kernel_l, const int kernel_h, const int kernel_w, const int pad_l, const int pad_h, const int pad_w, const int stride_l, const int stride_h, const int stride_w, const int channel_per_deformable_group, DType *grad_off) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels; index += blockDim.x * gridDim.x) { //GL"H"W" const int input_v = input_l * input_w * input_h; const int output_v = output_l * output_h * output_w; const int kernel_v = kernel_l * kernel_h * kernel_w; const int deform_group = input_c / channel_per_deformable_group; const int w_out = index % output_w; const int h_out = index / output_w % output_h; const int l_out = index / output_w / output_h % output_l; const int g_off = index / output_v % deform_group; const int l_in = l_out * stride_l - pad_l; const int h_in = h_out * stride_h - pad_h; const int w_in = w_out * stride_w - pad_w; //GL"H"W" int offset_base = g_off*output_v; int offset = l_out * output_h * output_w + h_out * output_w + w_out; const DType *data_offset_base_ptr = data_offset + offset_base; DType *grad_offset_base_ptr = grad_off + offset_base + offset; // printf("%d %f\n",threadIdx.x, *data_offset_base_ptr); DType val = 0; for (int i = 0; i < channel_per_deformable_group; ++i) for(int l_kernel =0; l_kernel<kernel_l; l_kernel++) for(int h_kernel =0; h_kernel<kernel_h; h_kernel++) for(int w_kernel =0; w_kernel<kernel_w; w_kernel++){ const int c_in = g_off * channel_per_deformable_group + i; //CL'H'W' L"H"W" const DType *data_col_base_ptr = data_col + (c_in * kernel_v + l_kernel * kernel_h * kernel_w + h_kernel * kernel_w + w_kernel) * output_v + l_out * output_h * output_w + h_out * output_w + w_out; //CLHW const DType *data_in_base_ptr = data_im + c_in * input_v; const int data_width_1d = input_w; const int data_width_2d = input_h * input_w; const DType l_in_after = l_in + l_kernel + data_offset_base_ptr[offset]; const DType h_in_after = h_in + h_kernel; const DType w_in_after = w_in + w_kernel; if (l_in_after > -1 && h_in_after > -1 && w_in_after > -1 && l_in_after < input_l && h_in_after < input_h && w_in_after < input_w) { int l_low = int(l_in_after); int l_high = (l_in_after >= input_l - 1 || l_in_after <= 0) ? l_low : l_low + 1; int a0 = l_low * data_width_2d + h_in_after * data_width_1d + w_in_after; int a1 = l_high * data_width_2d + h_in_after * data_width_1d + w_in_after; DType c0 = data_in_base_ptr[a0]; DType c1 = data_in_base_ptr[a1]; val += *data_col_base_ptr * (c1 - c0); } } *grad_offset_base_ptr = val; } } //template<typename DType> void deformable_col2im_offset(hipStream_t stream, const double *data_col, const double *data_im, const double *data_offset, const int input_c, const int input_l, const int input_h, const int input_w, const int output_l, const int output_h, const int output_w, const int kernel_l, const int kernel_h, const int kernel_w, const int pad_l, const int pad_h, const int pad_w, const int stride_l, const int stride_h, const int stride_w, const int channel_per_deformable_group, double *grad_offset) { const int num_kernels = (input_c / channel_per_deformable_group) * output_l * output_h * output_w; deformable_col2im_offset_gpu_kernel << < get_cuda_blocks(num_kernels), THREAD_PRE_BLOCK, 0, stream >> > ( num_kernels, data_col, data_im, data_offset, input_c, input_l, input_h, input_w, output_l, output_h, output_w, kernel_l, kernel_h, kernel_w, pad_l, pad_h, pad_w, stride_l, stride_h, stride_w, channel_per_deformable_group, grad_offset ); }
f4276f56caba20a2e7e9d6f833cd2e8a608480a0.cu
// // Created by lshi on 17-9-25. // #include "deform_conv3d_cuda_kernel.h" #include "THC/THC.h" #include "TH/TH.h" #define THREAD_PRE_BLOCK 1024 #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif //--------------------------------------------------forward--------------------------------------------- template<typename DType> __device__ DType Tri_Linear(const DType *bottom_data, const int length, const int height, const int width, const double l, const int h, const int w) { //length and area const int data_width_1d = width; const int data_width_2d = height * width; //get the cube, the function int can not be used in template int l_low = int(l); int l_high = (l >= length - 1 || l <= 0) ? l_low : l_low + 1; //the corner, format is lhw DType c0 = bottom_data[l_low * data_width_2d + h * data_width_1d + w]; DType c1 = bottom_data[l_high * data_width_2d + h * data_width_1d + w]; //calculate the distance between the point and corner, using 1 to make sure using the low if equal DType l_length = l - l_low; DType h_length = 1 - l_length; //interpolation DType c = c0 * h_length + c1 * l_length; return c; } template<typename DType> __global__ void deformable_im2col_gpu_kernel( const int num_kernels, const DType *data_im, const DType *data_offset, const int input_c, const int input_l, const int input_h, const int input_w, const int kernel_l, const int kernel_h, const int kernel_w, const int pad_l, const int pad_h, const int pad_w, const int stride_l, const int stride_h, const int stride_w, const int channel_per_deformable_group, const int output_l, const int output_h, const int output_w, DType *data_col) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels; index += blockDim.x * gridDim.x) { const int input_v = input_l * input_w * input_h; const int output_v = output_l * output_h * output_w; const int kernel_v = kernel_l * kernel_h * kernel_w; //L"H"W"CL'H'W' const int w_out = index % output_w; const int h_out = index / output_w % output_h; const int l_out = index / output_w / output_h % output_l; const int w_kernel = index / output_v % kernel_w; const int h_kernel = index / output_v / kernel_w % kernel_h; const int l_kernel = index / output_v / kernel_w / kernel_h % kernel_l; const int c_in = index / output_v / kernel_v % input_c; const int l_in = l_out * stride_l - pad_l; const int h_in = h_out * stride_h - pad_h; const int w_in = w_out * stride_w - pad_w; const int g_off = c_in / channel_per_deformable_group; // const int deform_group = input_c / channel_per_deformable_group; // printf("%d %d %d %d %d %d %d %d %d\n",threadIdx.x,b_in,l_out,h_out,w_out,c_in,l_kernel,h_kernel,w_kernel); //(CL'H'W')(L"H"W") DType *data_col_base_ptr = data_col + (c_in * kernel_v + l_kernel * kernel_h * kernel_w + h_kernel * kernel_w + w_kernel) * output_v + l_out * output_h * output_w + h_out * output_w + w_out; //CLHW const DType *data_in_base_ptr = data_im + c_in * input_v; //GL"H"W" const DType *data_offset_base_ptr = data_offset + g_off*output_v; const int offset = l_out*output_h*output_w+h_out*output_w+w_out; const DType l_in_after = l_in + l_kernel + data_offset_base_ptr[offset]; const DType h_in_after = h_in + h_kernel; const DType w_in_after = w_in + w_kernel; // printf("%d %f %f %f\n",threadIdx.x,l_in_after,h_in_after,w_in_after); DType val = 0; if (l_in_after > -1 && h_in_after > -1 && w_in_after > -1 && l_in_after < input_l && h_in_after < input_h && w_in_after < input_w) { //interpolation val = Tri_Linear(data_in_base_ptr, input_l, input_h, input_w, l_in_after, h_in_after, w_in_after); } *data_col_base_ptr = val; } } inline int get_cuda_blocks(const int num_kernel) { return (num_kernel + THREAD_PRE_BLOCK - 1) / THREAD_PRE_BLOCK; } void deformable_im2col(cudaStream_t stream, const double *data_in, const double *data_offset, const int input_c, const int input_l, const int input_h, const int input_w, const int output_l, const int output_h, const int output_w, const int kernel_l, const int kernel_h, const int kernel_w, const int pad_l, const int pad_h, const int pad_w, const int stride_l, const int stride_h, const int stride_w, const int channel_per_deformable_group, double *data_col) { int num_kernels = output_l * output_h * output_w * input_c * kernel_l * kernel_h * kernel_w; deformable_im2col_gpu_kernel << < get_cuda_blocks(num_kernels), THREAD_PRE_BLOCK, 0, stream >> > ( num_kernels, data_in, data_offset, input_c, input_l, input_h, input_w, kernel_l, kernel_h, kernel_w, pad_l, pad_h, pad_w, stride_l, stride_h, stride_w, channel_per_deformable_group, output_l, output_h, output_w, data_col); } //---------------------------------------------backward to input--------------------------------------------------- template<typename DType> __global__ void deformable_col2im_input_gpu_kernel( const int num_kernels, const DType *data_col, const DType *data_offset, const int input_c, const int input_l, const int input_h, const int input_w, const int output_l, const int output_h, const int output_w, const int kernel_l, const int kernel_h, const int kernel_w, const int pad_l, const int pad_h, const int pad_w, const int stride_l, const int stride_h, const int stride_w, const int channel_per_deformable_group, DType *grad_im) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels; index += blockDim.x * gridDim.x) { const int input_v = input_l * input_w * input_h; const int output_v = output_l * output_h * output_w; const int kernel_v = kernel_l * kernel_h * kernel_w; //L"H"W"CL'H'W' const int w_kernel = index % kernel_w; const int h_kernel = index / kernel_w % kernel_h; const int l_kernel = index / kernel_w / kernel_h % kernel_l; const int c_in = index / kernel_v % input_c; const int w_out = index / kernel_v / input_c % output_w; const int h_out = index / kernel_v / input_c / output_w % output_h; const int l_out = index / kernel_v / input_c / output_w / output_h % output_l; const int l_in = l_out * stride_l - pad_l; const int h_in = h_out * stride_h - pad_h; const int w_in = w_out * stride_w - pad_w; const int g_off = c_in / channel_per_deformable_group; // const int deform_group = input_c / channel_per_deformable_group; //CL'H'W' L"H"W" const DType *data_col_base_ptr = data_col + (c_in * kernel_v + l_kernel * kernel_h * kernel_w + h_kernel * kernel_w + w_kernel) * output_v + l_out * output_h * output_w + h_out * output_w + w_out; //GL"H"W" const DType *data_offset_base_ptr = data_offset + g_off*output_v; const int offset = l_out*output_h*output_w+h_out*output_w+w_out; //CLHW DType *grad_in_base_ptr = grad_im + c_in * input_v; // printf("%d %d %d %d %d %x\n", threadIdx.x,b_in,c_in,input_c, input_v, grad_in_base_ptr); const int data_width_1d = input_w; const int data_width_2d = input_h * input_w; const DType l_in_after = l_in + l_kernel + data_offset_base_ptr[offset]; const DType h_in_after = h_in + h_kernel; const DType w_in_after = w_in + w_kernel; // printf("%d %f %f %f\n", threadIdx.x,l_in_after,h_in_after,w_in_after); if (l_in_after > -1 && h_in_after > -1 && w_in_after > -1 && l_in_after < input_l && h_in_after < input_h && w_in_after < input_w) { //eight point around int l_low = int(l_in_after); int l_high = (l_in_after >= input_l - 1 || l_in_after <= 0) ? l_low : l_low + 1; int a0 = l_low * data_width_2d + h_in_after * data_width_1d + w_in_after; int a1 = l_high * data_width_2d + h_in_after * data_width_1d + w_in_after; DType l_length = l_in_after - l_low; DType h_length = 1 - l_length; //grad for input atomicAdd( grad_in_base_ptr + a0, h_length * (*data_col_base_ptr)); atomicAdd( grad_in_base_ptr + a1, l_length * (*data_col_base_ptr)); } } } //template<typename DType> void deformable_col2im_input(cudaStream_t stream, const double *data_col, const double *data_offset, const int input_c, const int input_l, const int input_h, const int input_w, const int output_l, const int output_h, const int output_w, const int kernel_l, const int kernel_h, const int kernel_w, const int pad_l, const int pad_h, const int pad_w, const int stride_l, const int stride_h, const int stride_w, const int channel_per_deformable_group, double *grad_im) { const int num_kernels = output_l * output_h * output_w * input_c * kernel_l * kernel_h * kernel_w; deformable_col2im_input_gpu_kernel << < get_cuda_blocks(num_kernels), THREAD_PRE_BLOCK, 0, stream >> > ( num_kernels, data_col, data_offset, input_c, input_l, input_h, input_w, output_l, output_h, output_w, kernel_l, kernel_h, kernel_w, pad_l, pad_h, pad_w, stride_l, stride_h, stride_w, channel_per_deformable_group, grad_im ); } //--------------------------------------------------backward to offset--------------------------------------------- template<typename DType> __global__ void deformable_col2im_offset_gpu_kernel( const int num_kernels, const DType *data_col, const DType *data_im, const DType *data_offset, const int input_c, const int input_l, const int input_h, const int input_w, const int output_l, const int output_h, const int output_w, const int kernel_l, const int kernel_h, const int kernel_w, const int pad_l, const int pad_h, const int pad_w, const int stride_l, const int stride_h, const int stride_w, const int channel_per_deformable_group, DType *grad_off) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels; index += blockDim.x * gridDim.x) { //GL"H"W" const int input_v = input_l * input_w * input_h; const int output_v = output_l * output_h * output_w; const int kernel_v = kernel_l * kernel_h * kernel_w; const int deform_group = input_c / channel_per_deformable_group; const int w_out = index % output_w; const int h_out = index / output_w % output_h; const int l_out = index / output_w / output_h % output_l; const int g_off = index / output_v % deform_group; const int l_in = l_out * stride_l - pad_l; const int h_in = h_out * stride_h - pad_h; const int w_in = w_out * stride_w - pad_w; //GL"H"W" int offset_base = g_off*output_v; int offset = l_out * output_h * output_w + h_out * output_w + w_out; const DType *data_offset_base_ptr = data_offset + offset_base; DType *grad_offset_base_ptr = grad_off + offset_base + offset; // printf("%d %f\n",threadIdx.x, *data_offset_base_ptr); DType val = 0; for (int i = 0; i < channel_per_deformable_group; ++i) for(int l_kernel =0; l_kernel<kernel_l; l_kernel++) for(int h_kernel =0; h_kernel<kernel_h; h_kernel++) for(int w_kernel =0; w_kernel<kernel_w; w_kernel++){ const int c_in = g_off * channel_per_deformable_group + i; //CL'H'W' L"H"W" const DType *data_col_base_ptr = data_col + (c_in * kernel_v + l_kernel * kernel_h * kernel_w + h_kernel * kernel_w + w_kernel) * output_v + l_out * output_h * output_w + h_out * output_w + w_out; //CLHW const DType *data_in_base_ptr = data_im + c_in * input_v; const int data_width_1d = input_w; const int data_width_2d = input_h * input_w; const DType l_in_after = l_in + l_kernel + data_offset_base_ptr[offset]; const DType h_in_after = h_in + h_kernel; const DType w_in_after = w_in + w_kernel; if (l_in_after > -1 && h_in_after > -1 && w_in_after > -1 && l_in_after < input_l && h_in_after < input_h && w_in_after < input_w) { int l_low = int(l_in_after); int l_high = (l_in_after >= input_l - 1 || l_in_after <= 0) ? l_low : l_low + 1; int a0 = l_low * data_width_2d + h_in_after * data_width_1d + w_in_after; int a1 = l_high * data_width_2d + h_in_after * data_width_1d + w_in_after; DType c0 = data_in_base_ptr[a0]; DType c1 = data_in_base_ptr[a1]; val += *data_col_base_ptr * (c1 - c0); } } *grad_offset_base_ptr = val; } } //template<typename DType> void deformable_col2im_offset(cudaStream_t stream, const double *data_col, const double *data_im, const double *data_offset, const int input_c, const int input_l, const int input_h, const int input_w, const int output_l, const int output_h, const int output_w, const int kernel_l, const int kernel_h, const int kernel_w, const int pad_l, const int pad_h, const int pad_w, const int stride_l, const int stride_h, const int stride_w, const int channel_per_deformable_group, double *grad_offset) { const int num_kernels = (input_c / channel_per_deformable_group) * output_l * output_h * output_w; deformable_col2im_offset_gpu_kernel << < get_cuda_blocks(num_kernels), THREAD_PRE_BLOCK, 0, stream >> > ( num_kernels, data_col, data_im, data_offset, input_c, input_l, input_h, input_w, output_l, output_h, output_w, kernel_l, kernel_h, kernel_w, pad_l, pad_h, pad_w, stride_l, stride_h, stride_w, channel_per_deformable_group, grad_offset ); }
44c8f278ae283b852a25778546b020df52ee55d4.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <stdio.h> #include <cassert> #include <hipcub/hipcub.hpp> // NOLINT #include <vector> #include "glog/logging.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.h" #include "paddle/fluid/operators/math/bert_encoder_functor.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { // Dynamic shape plugin requires TRT version greater than 6.0. #if IS_TRT_VERSION_GE(6000) template <typename T> EmbEltwiseLayernormPluginDynamicImpl< T>::~EmbEltwiseLayernormPluginDynamicImpl() { this->terminate(); } inline half fp32tofp16(float x) { return static_cast<half>(x); } template <typename T> void EmbEltwiseLayernormPluginDynamicImpl<T>::shareGPUData( const EmbEltwiseLayernormPluginDynamicImplBase *anthor) { auto *ptr = dynamic_cast<const EmbEltwiseLayernormPluginDynamicImpl<T> *>(anthor); if (!ptr->is_initialized_) { return; } embs_gpu_ = ptr->embs_gpu_; scale_gpu_ = ptr->scale_gpu_; bias_gpu_ = ptr->bias_gpu_; int input_num = embs_.size(); in_ptr_tensor_.Resize({input_num}); emb_ptr_tensor_.ShareDataWith(ptr->emb_ptr_tensor_); } template <typename T> int EmbEltwiseLayernormPluginDynamicImpl<T>::initialize() { if (is_initialized_) { return 0; } embs_gpu_.resize(embs_.size()); for (int i = 0; i < embs_.size(); i++) { if (embs_[i]) { T *host_ptr; auto size = emb_sizes_[i]; if (std::is_same<T, half>::value) { host_ptr = new T[size]; std::transform(embs_[i], (embs_[i] + size), host_ptr, fp32tofp16); } else { host_ptr = reinterpret_cast<T *>(embs_[i]); } hipMalloc(&embs_gpu_[i], sizeof(T) * size); hipMemcpy(embs_gpu_[i], host_ptr, size * sizeof(T), hipMemcpyHostToDevice); if (std::is_same<T, half>::value) { delete[] host_ptr; } } } if (bias_) { hipMalloc(&bias_gpu_, sizeof(float) * bias_size_); hipMemcpy(bias_gpu_, bias_, bias_size_ * sizeof(float), hipMemcpyHostToDevice); } if (scale_) { hipMalloc(&scale_gpu_, sizeof(float) * scale_size_); hipMemcpy(scale_gpu_, scale_, scale_size_ * sizeof(float), hipMemcpyHostToDevice); } int input_num = embs_.size(); in_ptr_tensor_.Resize({input_num}); emb_ptr_tensor_.Resize({input_num}); hipGetDevice(&device_id_); auto emb_ptr_gpu_d = emb_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_)); hipMemcpy(emb_ptr_gpu_d, embs_gpu_.data(), sizeof(uintptr_t) * input_num, hipMemcpyHostToDevice); is_initialized_ = true; return 0; } template <typename T> void EmbEltwiseLayernormPluginDynamicImpl<T>::terminate() { for (int i = 0; i < embs_gpu_.size(); ++i) { if (embs_gpu_[i]) { hipFree(embs_gpu_[i]); embs_gpu_[i] = nullptr; } } if (bias_gpu_) { hipFree(bias_gpu_); bias_gpu_ = nullptr; } if (scale_gpu_) { hipFree(scale_gpu_); scale_gpu_ = nullptr; } } template <typename T> int EmbEltwiseLayernormPluginDynamicImpl<T>::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, hipStream_t stream) { auto id_dims = input_desc[0].dims; int batch = id_dims.d[0]; int seq_len = id_dims.d[1]; int input_num = embs_.size(); hipGetDevice(&device_id_); auto in_ptr_gpu_d = in_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_)); auto emb_ptr_gpu_d = emb_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_)); auto new_input_ptr = reinterpret_cast<uintptr_t>(inputs[0]); if (old_input_ptr_ != new_input_ptr) { old_input_ptr_ = new_input_ptr; hipMemcpyAsync(in_ptr_gpu_d, reinterpret_cast<const void *>(inputs), sizeof(uintptr_t) * input_num, hipMemcpyHostToDevice, stream); } auto out_type = output_desc[0].type; if (std::is_same<T, float>::value) { PADDLE_ENFORCE_EQ( out_type == nvinfer1::DataType::kFLOAT, true, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only support fp32 input.")); } else if (std::is_same<T, half>::value) { PADDLE_ENFORCE_EQ( out_type == nvinfer1::DataType::kHALF, true, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only support fp16 input.")); } else { PADDLE_THROW(platform::errors::Fatal( "Unsupport data type, the out type of EmbEltwiseLayernorm should be " "float or half.")); } auto *output_d = reinterpret_cast<T *>(outputs[0]); operators::math::EmbEltwiseLayerNormFunctor<T> emb_eltwise_layernorm_func; emb_eltwise_layernorm_func(batch, seq_len, hidden_size_, in_ptr_gpu_d, scale_gpu_, bias_gpu_, emb_ptr_gpu_d, output_d, eps_, input_num, stream); return hipGetLastError() != hipSuccess; } template class EmbEltwiseLayernormPluginDynamicImpl<float>; #ifdef TRT_PLUGIN_FP16_AVALIABLE template class EmbEltwiseLayernormPluginDynamicImpl<half>; #endif int EmbEltwiseLayernormPluginDynamic::initialize() { impl_->initialize(); return 0; } void EmbEltwiseLayernormPluginDynamic::terminate() { impl_->terminate(); } nvinfer1::DimsExprs EmbEltwiseLayernormPluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) { // NOLINT PADDLE_ENFORCE_EQ(output_index, 0, platform::errors::InvalidArgument( "There is only one output of the EmbEltwiseLayernorm, " "so the index should be zero," "but it's (%d)", output_index)); nvinfer1::DimsExprs ret; ret.nbDims = 3; ret.d[0] = inputs[0].d[0]; ret.d[1] = inputs[0].d[1]; ret.d[2] = expr_builder.constant(hidden_size_); return ret; } bool EmbEltwiseLayernormPluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_EQ(nb_outputs, 1, platform::errors::InvalidArgument( "The EmbEltwiseLayerNorm's output should be one" "but it's (%d) outputs.", nb_outputs)); PADDLE_ENFORCE_EQ(nb_outputs, 1, platform::errors::InvalidArgument( "The EmbEltwiseLayerNorm's output should be one" "but it's (%d) outputs.", nb_outputs)); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); int all_nums = nb_inputs + nb_outputs; const nvinfer1::PluginTensorDesc &desc = in_out[pos]; if (desc.format != nvinfer1::TensorFormat::kLINEAR) { return false; } if (pos == 0) { return desc.type == nvinfer1::DataType::kINT32; } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; if (pos < all_nums - 1) { return desc.type == nvinfer1::DataType::kINT32 && desc.dims.d[0] == prev.dims.d[0] && desc.dims.d[1] == prev.dims.d[1]; } if (pos == all_nums - 1) { if (with_fp16_ == false) { return desc.type == nvinfer1::DataType::kFLOAT; } else { return desc.type == nvinfer1::DataType::kHALF; } } return false; } nvinfer1::DataType EmbEltwiseLayernormPluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const { PADDLE_ENFORCE_EQ( index, 0, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only has one input, so the " "index value should be 0, but get %d.", index)); if (with_fp16_) return nvinfer1::DataType::kHALF; else return nvinfer1::DataType::kFLOAT; } int EmbEltwiseLayernormPluginDynamic::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, hipStream_t stream) { impl_->enqueue(input_desc, output_desc, inputs, outputs, workspace, stream); return hipGetLastError() != hipSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
44c8f278ae283b852a25778546b020df52ee55d4.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <stdio.h> #include <cassert> #include <cub/cub.cuh> // NOLINT #include <vector> #include "glog/logging.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.h" #include "paddle/fluid/operators/math/bert_encoder_functor.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { // Dynamic shape plugin requires TRT version greater than 6.0. #if IS_TRT_VERSION_GE(6000) template <typename T> EmbEltwiseLayernormPluginDynamicImpl< T>::~EmbEltwiseLayernormPluginDynamicImpl() { this->terminate(); } inline half fp32tofp16(float x) { return static_cast<half>(x); } template <typename T> void EmbEltwiseLayernormPluginDynamicImpl<T>::shareGPUData( const EmbEltwiseLayernormPluginDynamicImplBase *anthor) { auto *ptr = dynamic_cast<const EmbEltwiseLayernormPluginDynamicImpl<T> *>(anthor); if (!ptr->is_initialized_) { return; } embs_gpu_ = ptr->embs_gpu_; scale_gpu_ = ptr->scale_gpu_; bias_gpu_ = ptr->bias_gpu_; int input_num = embs_.size(); in_ptr_tensor_.Resize({input_num}); emb_ptr_tensor_.ShareDataWith(ptr->emb_ptr_tensor_); } template <typename T> int EmbEltwiseLayernormPluginDynamicImpl<T>::initialize() { if (is_initialized_) { return 0; } embs_gpu_.resize(embs_.size()); for (int i = 0; i < embs_.size(); i++) { if (embs_[i]) { T *host_ptr; auto size = emb_sizes_[i]; if (std::is_same<T, half>::value) { host_ptr = new T[size]; std::transform(embs_[i], (embs_[i] + size), host_ptr, fp32tofp16); } else { host_ptr = reinterpret_cast<T *>(embs_[i]); } cudaMalloc(&embs_gpu_[i], sizeof(T) * size); cudaMemcpy(embs_gpu_[i], host_ptr, size * sizeof(T), cudaMemcpyHostToDevice); if (std::is_same<T, half>::value) { delete[] host_ptr; } } } if (bias_) { cudaMalloc(&bias_gpu_, sizeof(float) * bias_size_); cudaMemcpy(bias_gpu_, bias_, bias_size_ * sizeof(float), cudaMemcpyHostToDevice); } if (scale_) { cudaMalloc(&scale_gpu_, sizeof(float) * scale_size_); cudaMemcpy(scale_gpu_, scale_, scale_size_ * sizeof(float), cudaMemcpyHostToDevice); } int input_num = embs_.size(); in_ptr_tensor_.Resize({input_num}); emb_ptr_tensor_.Resize({input_num}); cudaGetDevice(&device_id_); auto emb_ptr_gpu_d = emb_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_)); cudaMemcpy(emb_ptr_gpu_d, embs_gpu_.data(), sizeof(uintptr_t) * input_num, cudaMemcpyHostToDevice); is_initialized_ = true; return 0; } template <typename T> void EmbEltwiseLayernormPluginDynamicImpl<T>::terminate() { for (int i = 0; i < embs_gpu_.size(); ++i) { if (embs_gpu_[i]) { cudaFree(embs_gpu_[i]); embs_gpu_[i] = nullptr; } } if (bias_gpu_) { cudaFree(bias_gpu_); bias_gpu_ = nullptr; } if (scale_gpu_) { cudaFree(scale_gpu_); scale_gpu_ = nullptr; } } template <typename T> int EmbEltwiseLayernormPluginDynamicImpl<T>::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, cudaStream_t stream) { auto id_dims = input_desc[0].dims; int batch = id_dims.d[0]; int seq_len = id_dims.d[1]; int input_num = embs_.size(); cudaGetDevice(&device_id_); auto in_ptr_gpu_d = in_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_)); auto emb_ptr_gpu_d = emb_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_)); auto new_input_ptr = reinterpret_cast<uintptr_t>(inputs[0]); if (old_input_ptr_ != new_input_ptr) { old_input_ptr_ = new_input_ptr; cudaMemcpyAsync(in_ptr_gpu_d, reinterpret_cast<const void *>(inputs), sizeof(uintptr_t) * input_num, cudaMemcpyHostToDevice, stream); } auto out_type = output_desc[0].type; if (std::is_same<T, float>::value) { PADDLE_ENFORCE_EQ( out_type == nvinfer1::DataType::kFLOAT, true, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only support fp32 input.")); } else if (std::is_same<T, half>::value) { PADDLE_ENFORCE_EQ( out_type == nvinfer1::DataType::kHALF, true, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only support fp16 input.")); } else { PADDLE_THROW(platform::errors::Fatal( "Unsupport data type, the out type of EmbEltwiseLayernorm should be " "float or half.")); } auto *output_d = reinterpret_cast<T *>(outputs[0]); operators::math::EmbEltwiseLayerNormFunctor<T> emb_eltwise_layernorm_func; emb_eltwise_layernorm_func(batch, seq_len, hidden_size_, in_ptr_gpu_d, scale_gpu_, bias_gpu_, emb_ptr_gpu_d, output_d, eps_, input_num, stream); return cudaGetLastError() != cudaSuccess; } template class EmbEltwiseLayernormPluginDynamicImpl<float>; #ifdef TRT_PLUGIN_FP16_AVALIABLE template class EmbEltwiseLayernormPluginDynamicImpl<half>; #endif int EmbEltwiseLayernormPluginDynamic::initialize() { impl_->initialize(); return 0; } void EmbEltwiseLayernormPluginDynamic::terminate() { impl_->terminate(); } nvinfer1::DimsExprs EmbEltwiseLayernormPluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) { // NOLINT PADDLE_ENFORCE_EQ(output_index, 0, platform::errors::InvalidArgument( "There is only one output of the EmbEltwiseLayernorm, " "so the index should be zero," "but it's (%d)", output_index)); nvinfer1::DimsExprs ret; ret.nbDims = 3; ret.d[0] = inputs[0].d[0]; ret.d[1] = inputs[0].d[1]; ret.d[2] = expr_builder.constant(hidden_size_); return ret; } bool EmbEltwiseLayernormPluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_EQ(nb_outputs, 1, platform::errors::InvalidArgument( "The EmbEltwiseLayerNorm's output should be one" "but it's (%d) outputs.", nb_outputs)); PADDLE_ENFORCE_EQ(nb_outputs, 1, platform::errors::InvalidArgument( "The EmbEltwiseLayerNorm's output should be one" "but it's (%d) outputs.", nb_outputs)); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); int all_nums = nb_inputs + nb_outputs; const nvinfer1::PluginTensorDesc &desc = in_out[pos]; if (desc.format != nvinfer1::TensorFormat::kLINEAR) { return false; } if (pos == 0) { return desc.type == nvinfer1::DataType::kINT32; } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; if (pos < all_nums - 1) { return desc.type == nvinfer1::DataType::kINT32 && desc.dims.d[0] == prev.dims.d[0] && desc.dims.d[1] == prev.dims.d[1]; } if (pos == all_nums - 1) { if (with_fp16_ == false) { return desc.type == nvinfer1::DataType::kFLOAT; } else { return desc.type == nvinfer1::DataType::kHALF; } } return false; } nvinfer1::DataType EmbEltwiseLayernormPluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const { PADDLE_ENFORCE_EQ( index, 0, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only has one input, so the " "index value should be 0, but get %d.", index)); if (with_fp16_) return nvinfer1::DataType::kHALF; else return nvinfer1::DataType::kFLOAT; } int EmbEltwiseLayernormPluginDynamic::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, cudaStream_t stream) { impl_->enqueue(input_desc, output_desc, inputs, outputs, workspace, stream); return cudaGetLastError() != cudaSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
e5ef23de6e473f3518738837461846d5ec232dce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // device utility to calculate target evidence __device__ void device_cal_evidence(network_in_device nw_device, int node_id, int t){ sparse_csr_weighted& csr_info = nw_device.csr_info; network_info& nw_info = nw_device.nw_info; simulation_single& sim_ptr = nw_device.sim_ptr; const n_nodes num_nodes = *csr_info.number_of_nodes; double& confidence = csr_info.confidence[node_id]; const double& p_threshold = *nw_info.p_threshold; const double& n_threshold = *nw_info.n_threshold; double *current_evidence = sim_ptr.evidence + t * num_nodes + node_id; int *p_activated = sim_ptr.activated_positive + t * num_nodes + node_id; int *n_activated = sim_ptr.activated_negative + t * num_nodes + node_id; int *total_activated_p = sim_ptr.total_activated_positive + node_id; int *total_activated_n = sim_ptr.total_activated_negative + node_id; if(t == 0){ if(nw_info.nodes_types[node_id] == NODE_TYPE_STUBBORN_P){ *current_evidence = 1; *p_activated = 1; *n_activated = 0; *total_activated_p = 1; *total_activated_n = 0; } else if(nw_info.nodes_types[node_id] == NODE_TYPE_STUBBORN_N){ *current_evidence = -1; *p_activated = 0; *n_activated = 1; *total_activated_p = 0; *total_activated_n = 1; } else if(nw_info.nodes_types[node_id] == NODE_TYPE_REGULAR){ *current_evidence = 0; *p_activated = 0; *n_activated = 0; *total_activated_p = 0; *total_activated_n = 0; } return; } double *prev_evidence = sim_ptr.evidence + (t - 1) * num_nodes + node_id; if(nw_info.nodes_types[node_id] != NODE_TYPE_REGULAR){ *current_evidence = *prev_evidence; if(nw_info.nodes_types[node_id] == NODE_TYPE_STUBBORN_N) (*total_activated_n)++; if(nw_info.nodes_types[node_id] == NODE_TYPE_STUBBORN_P) (*total_activated_p)++; return; } (*current_evidence) = (*prev_evidence) * confidence; int n_linked_nodes = csr_info.row_ptr[node_id + 1] - csr_info.row_ptr[node_id]; int node_ind; for(int node = 0; node < n_linked_nodes; node++){ node_ind = csr_info.col_index[csr_info.row_ptr[node_id] + node]; if( *(sim_ptr.activated_positive + (t - 1) * num_nodes + node_ind) || *(sim_ptr.activated_negative + (t - 1) * num_nodes + node_ind) ){ *current_evidence += (csr_info.influence[csr_info.row_ptr[node_id] + node]) * (*(sim_ptr.evidence + (t - 1) * num_nodes + node_ind)); } // only activated nodes can send evidence } if((*current_evidence) > p_threshold){ *p_activated = 1; *n_activated = 0; (*total_activated_p) ++; } else if( (*current_evidence) < n_threshold){ *p_activated = 0; *n_activated = 1; (*total_activated_n) ++; }else{ *p_activated = 0; *n_activated = 0; } return; } __global__ void device_cal_evidence_global(network_in_device nw_device, int t){ // less than 1024 nodes case const n_nodes num_nodes = *nw_device.csr_info.number_of_nodes; int node_id = threadIdx.x; if(node_id < num_nodes){ device_cal_evidence(nw_device, node_id, t); } } simulation_single device_cal_evidence_host(const sparse_csr_weighted &csr_info, const network_info &h_nw_info){ // we need number of threads equal to the nmber of nodes network_in_device nw_device = cp_to_device(csr_info, h_nw_info); const int t_length = *h_nw_info.time_length; const int n_threads = 1024; for(int t = 0; t < t_length; t++){ // for less than 1024 nodes hipLaunchKernelGGL(( device_cal_evidence_global), dim3(1), dim3(n_threads), 0, 0, nw_device, t); hipDeviceSynchronize(); } return cp_to_host(nw_device); }
e5ef23de6e473f3518738837461846d5ec232dce.cu
// device utility to calculate target evidence __device__ void device_cal_evidence(network_in_device nw_device, int node_id, int t){ sparse_csr_weighted& csr_info = nw_device.csr_info; network_info& nw_info = nw_device.nw_info; simulation_single& sim_ptr = nw_device.sim_ptr; const n_nodes num_nodes = *csr_info.number_of_nodes; double& confidence = csr_info.confidence[node_id]; const double& p_threshold = *nw_info.p_threshold; const double& n_threshold = *nw_info.n_threshold; double *current_evidence = sim_ptr.evidence + t * num_nodes + node_id; int *p_activated = sim_ptr.activated_positive + t * num_nodes + node_id; int *n_activated = sim_ptr.activated_negative + t * num_nodes + node_id; int *total_activated_p = sim_ptr.total_activated_positive + node_id; int *total_activated_n = sim_ptr.total_activated_negative + node_id; if(t == 0){ if(nw_info.nodes_types[node_id] == NODE_TYPE_STUBBORN_P){ *current_evidence = 1; *p_activated = 1; *n_activated = 0; *total_activated_p = 1; *total_activated_n = 0; } else if(nw_info.nodes_types[node_id] == NODE_TYPE_STUBBORN_N){ *current_evidence = -1; *p_activated = 0; *n_activated = 1; *total_activated_p = 0; *total_activated_n = 1; } else if(nw_info.nodes_types[node_id] == NODE_TYPE_REGULAR){ *current_evidence = 0; *p_activated = 0; *n_activated = 0; *total_activated_p = 0; *total_activated_n = 0; } return; } double *prev_evidence = sim_ptr.evidence + (t - 1) * num_nodes + node_id; if(nw_info.nodes_types[node_id] != NODE_TYPE_REGULAR){ *current_evidence = *prev_evidence; if(nw_info.nodes_types[node_id] == NODE_TYPE_STUBBORN_N) (*total_activated_n)++; if(nw_info.nodes_types[node_id] == NODE_TYPE_STUBBORN_P) (*total_activated_p)++; return; } (*current_evidence) = (*prev_evidence) * confidence; int n_linked_nodes = csr_info.row_ptr[node_id + 1] - csr_info.row_ptr[node_id]; int node_ind; for(int node = 0; node < n_linked_nodes; node++){ node_ind = csr_info.col_index[csr_info.row_ptr[node_id] + node]; if( *(sim_ptr.activated_positive + (t - 1) * num_nodes + node_ind) || *(sim_ptr.activated_negative + (t - 1) * num_nodes + node_ind) ){ *current_evidence += (csr_info.influence[csr_info.row_ptr[node_id] + node]) * (*(sim_ptr.evidence + (t - 1) * num_nodes + node_ind)); } // only activated nodes can send evidence } if((*current_evidence) > p_threshold){ *p_activated = 1; *n_activated = 0; (*total_activated_p) ++; } else if( (*current_evidence) < n_threshold){ *p_activated = 0; *n_activated = 1; (*total_activated_n) ++; }else{ *p_activated = 0; *n_activated = 0; } return; } __global__ void device_cal_evidence_global(network_in_device nw_device, int t){ // less than 1024 nodes case const n_nodes num_nodes = *nw_device.csr_info.number_of_nodes; int node_id = threadIdx.x; if(node_id < num_nodes){ device_cal_evidence(nw_device, node_id, t); } } simulation_single device_cal_evidence_host(const sparse_csr_weighted &csr_info, const network_info &h_nw_info){ // we need number of threads equal to the nmber of nodes network_in_device nw_device = cp_to_device(csr_info, h_nw_info); const int t_length = *h_nw_info.time_length; const int n_threads = 1024; for(int t = 0; t < t_length; t++){ // for less than 1024 nodes device_cal_evidence_global<<<1, n_threads>>>(nw_device, t); cudaDeviceSynchronize(); } return cp_to_host(nw_device); }
e687b75c15b3f48c38080c6f927c7716786830e0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright 2018 The JmcAuto Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include "network.h" namespace jmc_auto { namespace perception { __host__ __device__ float sigmoid_gpu(float x) { return 1.0 / (1.0 + exp(-x)); } __global__ void get_object_kernel(int n, const float *loc_data, const float *obj_data, const float *cls_data, const float *ori_data, const float *dim_data, const float *lof_data, const float *lor_data, const float *anchor_data, int width, int height, int num_anchors, int num_classes, float confidence_threshold, bool with_ori, bool with_dim, bool with_lof, bool with_lor, float *res_box_data, float *res_cls_data, int s_box_block_size) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) { int box_block = s_box_block_size; int idx = i; int c = idx % num_anchors; idx = idx / num_anchors; int w = idx % width; idx = idx / width; int h = idx; int offset_loc = ((h * width + w) * num_anchors + c) * 4; int offset_obj = (h * width + w) * num_anchors + c; int offset_cls = ((h * width + w) * num_anchors + c) * num_classes; float scale = obj_data[offset_obj]; // printf("%d %d %d %d %d (%d %d %d)| ",i,c,w,h,offset_loc,num_anchors,width,height); float cx = (w + sigmoid_gpu(loc_data[offset_loc + 0])) / width; float cy = (h + sigmoid_gpu(loc_data[offset_loc + 1])) / height; float hw = exp(loc_data[offset_loc + 2])* anchor_data[2 * c] / width * 0.5; float hh = exp(loc_data[offset_loc + 3]) * anchor_data[2 * c + 1] / height * 0.5; for (int k = 0; k < num_classes; ++k) { float prob = (cls_data[offset_cls + k] * scale > confidence_threshold ? cls_data[offset_cls + k] * scale : 0); // printf("%f %f | ",prob,cls_data[offset_cls + k] * scale); res_cls_data[k*width*height*num_anchors+i]=prob; } res_box_data[i * box_block + 0] = cx - hw; res_box_data[i * box_block + 1] = cy - hh; res_box_data[i * box_block + 2] = cx + hw; res_box_data[i * box_block + 3] = cy + hh; if (with_ori) { int offset_ori = ((h * width + w) * num_anchors + c) * 2; res_box_data[i*box_block+4]=atan2(ori_data[offset_ori+1],ori_data[offset_ori]); } if (with_dim) { int offset_dim = ((h * width + w) * num_anchors + c) * 3; res_box_data[i*box_block+5]=dim_data[offset_dim + 0]; res_box_data[i*box_block+6]=dim_data[offset_dim + 1]; res_box_data[i*box_block+7]=dim_data[offset_dim + 2]; } if (with_lof) { int offset_lof = ((h * width + w) * num_anchors + c) * 4; auto &&dst_ptr = res_box_data + i * box_block + 8; auto &&src_ptr = lof_data + offset_lof; auto sb_x = src_ptr[0] * hw * 2 + cx; auto sb_y = src_ptr[1] * hh * 2 + cy; auto sb_hw = exp(src_ptr[2]) * hw; auto sb_hh = exp(src_ptr[3]) * hh; dst_ptr[0] = sb_x - sb_hw; dst_ptr[1] = sb_y - sb_hh; dst_ptr[2] = sb_x + sb_hw; dst_ptr[3] = sb_y + sb_hh; } if (with_lor) { int offset_lor = ((h * width + w) * num_anchors + c) * 4; auto &&dst_ptr = res_box_data + i * box_block + 12; auto &&src_ptr = lor_data + offset_lor; auto sb_x = src_ptr[0] * hw * 2 + cx; auto sb_y = src_ptr[1] * hh * 2 + cy; auto sb_hw = exp(src_ptr[2]) * hw; auto sb_hh = exp(src_ptr[3]) * hh; dst_ptr[0] = sb_x - sb_hw; dst_ptr[1] = sb_y - sb_hh; dst_ptr[2] = sb_x + sb_hw; dst_ptr[3] = sb_y + sb_hh; } } } void GetObjectsGPU(int n, const float *loc_data, const float *obj_data, const float *cls_data, const float *ori_data, const float *dim_data, const float *lof_data, const float *lor_data, const float *anchor_data, int width, int height, int num_anchors, int num_classes, float confidence_threshold, bool with_ori, bool with_dim, bool with_lof, bool with_lor, float *res_box_data, float *res_cls_data, int s_box_block_size) { const int thread_size = 512; int block_size = (n + thread_size -1) / thread_size; { get_object_kernel << < block_size, thread_size >> > (n, loc_data, obj_data, cls_data, ori_data, dim_data, lof_data, lor_data, anchor_data, width, height, num_anchors, num_classes, confidence_threshold, with_ori, with_dim, with_lof, with_lor, res_box_data, res_cls_data, s_box_block_size); } hipDeviceSynchronize(); } } // namespace jmc_auto } // namespace perception
e687b75c15b3f48c38080c6f927c7716786830e0.cu
/****************************************************************************** * Copyright 2018 The JmcAuto Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include "network.h" namespace jmc_auto { namespace perception { __host__ __device__ float sigmoid_gpu(float x) { return 1.0 / (1.0 + exp(-x)); } __global__ void get_object_kernel(int n, const float *loc_data, const float *obj_data, const float *cls_data, const float *ori_data, const float *dim_data, const float *lof_data, const float *lor_data, const float *anchor_data, int width, int height, int num_anchors, int num_classes, float confidence_threshold, bool with_ori, bool with_dim, bool with_lof, bool with_lor, float *res_box_data, float *res_cls_data, int s_box_block_size) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) { int box_block = s_box_block_size; int idx = i; int c = idx % num_anchors; idx = idx / num_anchors; int w = idx % width; idx = idx / width; int h = idx; int offset_loc = ((h * width + w) * num_anchors + c) * 4; int offset_obj = (h * width + w) * num_anchors + c; int offset_cls = ((h * width + w) * num_anchors + c) * num_classes; float scale = obj_data[offset_obj]; // printf("%d %d %d %d %d (%d %d %d)| ",i,c,w,h,offset_loc,num_anchors,width,height); float cx = (w + sigmoid_gpu(loc_data[offset_loc + 0])) / width; float cy = (h + sigmoid_gpu(loc_data[offset_loc + 1])) / height; float hw = exp(loc_data[offset_loc + 2])* anchor_data[2 * c] / width * 0.5; float hh = exp(loc_data[offset_loc + 3]) * anchor_data[2 * c + 1] / height * 0.5; for (int k = 0; k < num_classes; ++k) { float prob = (cls_data[offset_cls + k] * scale > confidence_threshold ? cls_data[offset_cls + k] * scale : 0); // printf("%f %f | ",prob,cls_data[offset_cls + k] * scale); res_cls_data[k*width*height*num_anchors+i]=prob; } res_box_data[i * box_block + 0] = cx - hw; res_box_data[i * box_block + 1] = cy - hh; res_box_data[i * box_block + 2] = cx + hw; res_box_data[i * box_block + 3] = cy + hh; if (with_ori) { int offset_ori = ((h * width + w) * num_anchors + c) * 2; res_box_data[i*box_block+4]=atan2(ori_data[offset_ori+1],ori_data[offset_ori]); } if (with_dim) { int offset_dim = ((h * width + w) * num_anchors + c) * 3; res_box_data[i*box_block+5]=dim_data[offset_dim + 0]; res_box_data[i*box_block+6]=dim_data[offset_dim + 1]; res_box_data[i*box_block+7]=dim_data[offset_dim + 2]; } if (with_lof) { int offset_lof = ((h * width + w) * num_anchors + c) * 4; auto &&dst_ptr = res_box_data + i * box_block + 8; auto &&src_ptr = lof_data + offset_lof; auto sb_x = src_ptr[0] * hw * 2 + cx; auto sb_y = src_ptr[1] * hh * 2 + cy; auto sb_hw = exp(src_ptr[2]) * hw; auto sb_hh = exp(src_ptr[3]) * hh; dst_ptr[0] = sb_x - sb_hw; dst_ptr[1] = sb_y - sb_hh; dst_ptr[2] = sb_x + sb_hw; dst_ptr[3] = sb_y + sb_hh; } if (with_lor) { int offset_lor = ((h * width + w) * num_anchors + c) * 4; auto &&dst_ptr = res_box_data + i * box_block + 12; auto &&src_ptr = lor_data + offset_lor; auto sb_x = src_ptr[0] * hw * 2 + cx; auto sb_y = src_ptr[1] * hh * 2 + cy; auto sb_hw = exp(src_ptr[2]) * hw; auto sb_hh = exp(src_ptr[3]) * hh; dst_ptr[0] = sb_x - sb_hw; dst_ptr[1] = sb_y - sb_hh; dst_ptr[2] = sb_x + sb_hw; dst_ptr[3] = sb_y + sb_hh; } } } void GetObjectsGPU(int n, const float *loc_data, const float *obj_data, const float *cls_data, const float *ori_data, const float *dim_data, const float *lof_data, const float *lor_data, const float *anchor_data, int width, int height, int num_anchors, int num_classes, float confidence_threshold, bool with_ori, bool with_dim, bool with_lof, bool with_lor, float *res_box_data, float *res_cls_data, int s_box_block_size) { const int thread_size = 512; int block_size = (n + thread_size -1) / thread_size; { get_object_kernel << < block_size, thread_size >> > (n, loc_data, obj_data, cls_data, ori_data, dim_data, lof_data, lor_data, anchor_data, width, height, num_anchors, num_classes, confidence_threshold, with_ori, with_dim, with_lof, with_lor, res_box_data, res_cls_data, s_box_block_size); } cudaDeviceSynchronize(); } } // namespace jmc_auto } // namespace perception
4e3aa3ad92db9c4bb0ad1cc9513f29a8374ca52f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file main.cu * \brief Single-precision A*X plus Y (SAXPY) implementation in CUDA. */ #include <algorithm> #include <cassert> #include <iostream> #include <vector> /** * \brief SAXPY Kernel. * * Calculates Y = a*X + Y. */ __global__ void saxpy(const float a, const float *X, float *Y, size_t N) { // Compute global index int i = blockDim.x * blockIdx.x + threadIdx.x; // Compute SAXPY if we are inside of bounds if (i < N) Y[i] = a * X[i] + Y[i]; } /** * \brief Program entry-point. */ int main(int argc, char **argv) { if (argc < 2) { std::fprintf(stderr, "Error: missing command-line parameter\n"); std::exit(EXIT_FAILURE); } size_t N = atol(argv[1]); size_t T = (argc > 2) ? atol(argv[2]) : 128; size_t B = (N + T - 1) / T; float *d_X, *d_Y; float ms; hipEvent_t start, stop; // Sanity checks assert(N > 0); assert(T >= 32); // Host vectors, N elements initialized to 1 std::vector<float> h_X(N, 1); std::vector<float> h_Y(N, 1); // Instantiate things hipEventCreate(&start); hipEventCreate(&stop); hipMalloc(&d_X, N * sizeof(float)); hipMalloc(&d_Y, N * sizeof(float)); // Copy X and Y to the GPU hipMemcpy(d_X, h_X.data(), N * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_Y, h_Y.data(), N * sizeof(float), hipMemcpyHostToDevice); // Launch kernel hipEventRecord(start); hipLaunchKernelGGL(( saxpy), dim3(B), dim3(T), 0, 0, 10, d_X, d_Y, N); hipEventRecord(stop); // Copy output data hipMemcpy(h_Y.data(), d_Y, N * sizeof(float), hipMemcpyDeviceToHost); hipEventSynchronize(stop); hipEventElapsedTime(&ms, start, stop); // Count how many elements match auto matches = std::count_if(h_Y.begin(), h_Y.end(), [](float x) { return x == 11; }); std::printf("Elements matching = %d\n", matches); std::printf("Elapsed time (ms) = %g\n", ms); std::printf("Effective bandwidth (GB/s) = %g\n", N*4*3/ms/1e6); std::printf("Throughput (GFLOP/s) = %g\n", 2*N/ms/1e6); // Cleanup the mess hipFree(d_X); hipFree(d_Y); hipEventDestroy(start); hipEventDestroy(stop); return 0; }
4e3aa3ad92db9c4bb0ad1cc9513f29a8374ca52f.cu
/** * \file main.cu * \brief Single-precision A*X plus Y (SAXPY) implementation in CUDA. */ #include <algorithm> #include <cassert> #include <iostream> #include <vector> /** * \brief SAXPY Kernel. * * Calculates Y = a*X + Y. */ __global__ void saxpy(const float a, const float *X, float *Y, size_t N) { // Compute global index int i = blockDim.x * blockIdx.x + threadIdx.x; // Compute SAXPY if we are inside of bounds if (i < N) Y[i] = a * X[i] + Y[i]; } /** * \brief Program entry-point. */ int main(int argc, char **argv) { if (argc < 2) { std::fprintf(stderr, "Error: missing command-line parameter\n"); std::exit(EXIT_FAILURE); } size_t N = atol(argv[1]); size_t T = (argc > 2) ? atol(argv[2]) : 128; size_t B = (N + T - 1) / T; float *d_X, *d_Y; float ms; cudaEvent_t start, stop; // Sanity checks assert(N > 0); assert(T >= 32); // Host vectors, N elements initialized to 1 std::vector<float> h_X(N, 1); std::vector<float> h_Y(N, 1); // Instantiate things cudaEventCreate(&start); cudaEventCreate(&stop); cudaMalloc(&d_X, N * sizeof(float)); cudaMalloc(&d_Y, N * sizeof(float)); // Copy X and Y to the GPU cudaMemcpy(d_X, h_X.data(), N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_Y, h_Y.data(), N * sizeof(float), cudaMemcpyHostToDevice); // Launch kernel cudaEventRecord(start); saxpy<<<B, T>>>(10, d_X, d_Y, N); cudaEventRecord(stop); // Copy output data cudaMemcpy(h_Y.data(), d_Y, N * sizeof(float), cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); // Count how many elements match auto matches = std::count_if(h_Y.begin(), h_Y.end(), [](float x) { return x == 11; }); std::printf("Elements matching = %d\n", matches); std::printf("Elapsed time (ms) = %g\n", ms); std::printf("Effective bandwidth (GB/s) = %g\n", N*4*3/ms/1e6); std::printf("Throughput (GFLOP/s) = %g\n", 2*N/ms/1e6); // Cleanup the mess cudaFree(d_X); cudaFree(d_Y); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
644918f3355ee33912a8f0e61edc7bbd05b3d12b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" template <typename T> __global__ void kernelgpuInitu2(T *f, T *xdg, T *uinf, T *param, int modelnumber, int ng, int ncx, int nce, int npe, int ne) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i<ng) { int j = i%npe; int k = (i-j)/npe; T xdg1 = xdg[j+npe*0+npe*ncx*k]; T xdg2 = xdg[j+npe*1+npe*ncx*k]; f[j+npe*0+npe*nce*k] = 0.0; i += blockDim.x * gridDim.x; } } template <typename T> void gpuInitu2(T *f, T *xdg, T *uinf, T *param, int modelnumber, int ng, int ncx, int nce, int npe, int ne) { int blockDim = 256; int gridDim = (ng + blockDim - 1) / blockDim; gridDim = (gridDim>1024)? 1024 : gridDim; hipLaunchKernelGGL(( kernelgpuInitu2), dim3(gridDim), dim3(blockDim), 0, 0, f, xdg, uinf, param, modelnumber, ng, ncx, nce, npe, ne); } template void gpuInitu2(double *, double *, double *, double *, int, int, int, int, int, int); template void gpuInitu2(float *, float *, float *, float *, int, int, int, int, int, int);
644918f3355ee33912a8f0e61edc7bbd05b3d12b.cu
template <typename T> __global__ void kernelgpuInitu2(T *f, T *xdg, T *uinf, T *param, int modelnumber, int ng, int ncx, int nce, int npe, int ne) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i<ng) { int j = i%npe; int k = (i-j)/npe; T xdg1 = xdg[j+npe*0+npe*ncx*k]; T xdg2 = xdg[j+npe*1+npe*ncx*k]; f[j+npe*0+npe*nce*k] = 0.0; i += blockDim.x * gridDim.x; } } template <typename T> void gpuInitu2(T *f, T *xdg, T *uinf, T *param, int modelnumber, int ng, int ncx, int nce, int npe, int ne) { int blockDim = 256; int gridDim = (ng + blockDim - 1) / blockDim; gridDim = (gridDim>1024)? 1024 : gridDim; kernelgpuInitu2<<<gridDim, blockDim>>>(f, xdg, uinf, param, modelnumber, ng, ncx, nce, npe, ne); } template void gpuInitu2(double *, double *, double *, double *, int, int, int, int, int, int); template void gpuInitu2(float *, float *, float *, float *, int, int, int, int, int, int);
c3bece8d159d5dc5d398b4ef668741300f264aa1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #define N 100 // total number of items in vectors #define nthreads 4 // total number of threads in a block __global__ void estimatepi(int n, int *sum) { __shared__ int counter[nthreads]; int threadID; threadID = blockIdx.x * blockDim.x + threadIdx.x; unsigned int seed = threadID; hiprandState_t s; hiprand_init(seed, 0, 0, &s); if(threadID < n){ double x, y, diff, angle; int t; counter[threadIdx.x] = 0; for (t = 0; t<n; t++){ x = hiprand_uniform(&s); //hiprand y = hiprand_uniform(&s); //hiprand while(x*x + y*y > 1){ x = hiprand_uniform(&s); //hiprand y = hiprand_uniform(&s); //rand } angle = atan2 ( y, x ); //use inverse tan; diff = hiprand_uniform(&s); if(diff <= sin (angle) *2){ counter[threadIdx.x] = counter[threadIdx.x] + 1; } } if(threadIdx.x == 0){ sum[blockIdx.x] = 0; for(int i=0; i<nthreads; i++) { sum[blockIdx.x] = sum[blockIdx.x] + counter[i]; } } } } int main() { srand(time(NULL)); int *sum_h; int *sum_d; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float milliseconds = 0; sum_h = (int*)malloc( N* sizeof(int)); hipMalloc((void**)&sum_d, N * sizeof(int)); int nblocks = (N + nthreads - 1)/nthreads; hipEventRecord(start); hipLaunchKernelGGL(( estimatepi), dim3(nblocks),dim3(nthreads), 0, 0, N,sum_d); hipEventRecord(stop); hipMemcpy(sum_h, sum_d, N * sizeof(int), hipMemcpyDeviceToHost); int success = 0; for(int i = 0; i < nblocks; i++){ success = sum_h[i] + success; } printf("trials === %d", N * nblocks * nthreads ); printf(" success === %d\n", success); double pi_estimate = 2 * N * nthreads * nblocks/( double )success; printf("pi_estimate == %f", pi_estimate); hipEventElapsedTime(&milliseconds, start, stop); printf("elaspsed = %f ms", milliseconds); printf("\n"); hipFree(sum_d); free(sum_h); }
c3bece8d159d5dc5d398b4ef668741300f264aa1.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> #include <curand_kernel.h> #define N 100 // total number of items in vectors #define nthreads 4 // total number of threads in a block __global__ void estimatepi(int n, int *sum) { __shared__ int counter[nthreads]; int threadID; threadID = blockIdx.x * blockDim.x + threadIdx.x; unsigned int seed = threadID; curandState s; curand_init(seed, 0, 0, &s); if(threadID < n){ double x, y, diff, angle; int t; counter[threadIdx.x] = 0; for (t = 0; t<n; t++){ x = curand_uniform(&s); //curand y = curand_uniform(&s); //curand while(x*x + y*y > 1){ x = curand_uniform(&s); //curand y = curand_uniform(&s); //rand } angle = atan2 ( y, x ); //use inverse tan; diff = curand_uniform(&s); if(diff <= sin (angle) *2){ counter[threadIdx.x] = counter[threadIdx.x] + 1; } } if(threadIdx.x == 0){ sum[blockIdx.x] = 0; for(int i=0; i<nthreads; i++) { sum[blockIdx.x] = sum[blockIdx.x] + counter[i]; } } } } int main() { srand(time(NULL)); int *sum_h; int *sum_d; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; sum_h = (int*)malloc( N* sizeof(int)); cudaMalloc((void**)&sum_d, N * sizeof(int)); int nblocks = (N + nthreads - 1)/nthreads; cudaEventRecord(start); estimatepi<<<nblocks,nthreads>>>(N,sum_d); cudaEventRecord(stop); cudaMemcpy(sum_h, sum_d, N * sizeof(int), cudaMemcpyDeviceToHost); int success = 0; for(int i = 0; i < nblocks; i++){ success = sum_h[i] + success; } printf("trials === %d", N * nblocks * nthreads ); printf(" success === %d\n", success); double pi_estimate = 2 * N * nthreads * nblocks/( double )success; printf("pi_estimate == %f", pi_estimate); cudaEventElapsedTime(&milliseconds, start, stop); printf("elaspsed = %f ms", milliseconds); printf("\n"); cudaFree(sum_d); free(sum_h); }
b314fc2006b36cf6cc03d7518e7bee8ed127a9ce.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "twiddleRealKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *wr = NULL; hipMalloc(&wr, XSIZE*YSIZE); float *w = NULL; hipMalloc(&w, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( twiddleRealKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, wr,w,N); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( twiddleRealKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, wr,w,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( twiddleRealKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, wr,w,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b314fc2006b36cf6cc03d7518e7bee8ed127a9ce.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "twiddleRealKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *wr = NULL; cudaMalloc(&wr, XSIZE*YSIZE); float *w = NULL; cudaMalloc(&w, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); twiddleRealKernel<<<gridBlock,threadBlock>>>(wr,w,N); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { twiddleRealKernel<<<gridBlock,threadBlock>>>(wr,w,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { twiddleRealKernel<<<gridBlock,threadBlock>>>(wr,w,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e6c60bc5e73ad5a446edd1bc7c7145aa217da3b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "1dfft.hpp" using namespace std; /* Calcuate FFT with cuFTT */ float fft_cuda(const double* idata, double* odata, int Nx) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); /* Allocate memory for data on device, then copy data */ double *idata_c, *odata_c; hipfftDoubleComplex *idata_cx, *odata_cx; hipMalloc(&odata_c, sizeof(double) * Nx); hipMalloc(&idata_c, sizeof(double) * Nx); hipMalloc(&idata_cx, sizeof(hipfftDoubleComplex) * Nx); hipMalloc(&odata_cx, sizeof(hipfftDoubleComplex) * Nx); hipMemcpy(idata_c, idata, sizeof(double) * Nx, hipMemcpyHostToDevice); /* Convert data into hipfftDoubleComplex */ /* set 1 block with 256 threads */ hipLaunchKernelGGL(( real2complex), dim3(1), dim3(8), 0, 0, idata_c, idata_cx, Nx); hipDeviceSynchronize(); /* FFT Plans */ hipfftHandle plan; hipfftPlan1d(&plan, Nx, HIPFFT_Z2Z, 1); // auto start = chrono::high_resolution_clock::now(); /* Forward FFT */ hipEventRecord(start); hipfftExecZ2Z(plan, idata_cx, odata_cx, HIPFFT_FORWARD); hipEventRecord(stop); hipEventSynchronize(stop); /* stop the time */ /* std::chrono::_V2::system_clock::time_point finish */ float duration = 0; // milliseconds hipEventElapsedTime(&duration, start, stop); /* Convert cufft back to double array */ /* set 1 block with 8 threads */ hipLaunchKernelGGL(( complex2real), dim3(1), dim3(8), 0, 0, odata_cx, odata_c, Nx); hipDeviceSynchronize(); hipMemcpy(odata, odata_c, sizeof(double)*Nx, hipMemcpyDeviceToHost); hipfftDestroy(plan); hipFree(idata_c); hipFree(idata_cx); hipFree(odata_c); return duration; } /* convert a double array to cuffComplex data type. Imaginary parts are * set to 0 */ __global__ void real2complex(double *f, hipfftDoubleComplex *fc, int N) { /* Assume 1D grid of 1D blocks */ int index = blockIdx.x *blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; while (index < N) { fc[index].x = f[index]; fc[index].y = 0; index += stride; } } /* convert a cuffComplex data type to a double array. */ __global__ void complex2real(hipfftDoubleComplex *fc, double *f, int N) { /* Assume 1D grid of 1D blocks */ int index = blockIdx.x *blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; while (index < N) { f[index] = fc[index].x; index += stride; } }
e6c60bc5e73ad5a446edd1bc7c7145aa217da3b6.cu
#include "1dfft.hpp" using namespace std; /* Calcuate FFT with cuFTT */ float fft_cuda(const double* idata, double* odata, int Nx) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); /* Allocate memory for data on device, then copy data */ double *idata_c, *odata_c; cufftDoubleComplex *idata_cx, *odata_cx; cudaMalloc(&odata_c, sizeof(double) * Nx); cudaMalloc(&idata_c, sizeof(double) * Nx); cudaMalloc(&idata_cx, sizeof(cufftDoubleComplex) * Nx); cudaMalloc(&odata_cx, sizeof(cufftDoubleComplex) * Nx); cudaMemcpy(idata_c, idata, sizeof(double) * Nx, cudaMemcpyHostToDevice); /* Convert data into cufftDoubleComplex */ /* set 1 block with 256 threads */ real2complex<<<1, 8>>>(idata_c, idata_cx, Nx); cudaDeviceSynchronize(); /* FFT Plans */ cufftHandle plan; cufftPlan1d(&plan, Nx, CUFFT_Z2Z, 1); // auto start = chrono::high_resolution_clock::now(); /* Forward FFT */ cudaEventRecord(start); cufftExecZ2Z(plan, idata_cx, odata_cx, CUFFT_FORWARD); cudaEventRecord(stop); cudaEventSynchronize(stop); /* stop the time */ /* std::chrono::_V2::system_clock::time_point finish */ float duration = 0; // milliseconds cudaEventElapsedTime(&duration, start, stop); /* Convert cufft back to double array */ /* set 1 block with 8 threads */ complex2real<<<1, 8>>>(odata_cx, odata_c, Nx); cudaDeviceSynchronize(); cudaMemcpy(odata, odata_c, sizeof(double)*Nx, cudaMemcpyDeviceToHost); cufftDestroy(plan); cudaFree(idata_c); cudaFree(idata_cx); cudaFree(odata_c); return duration; } /* convert a double array to cuffComplex data type. Imaginary parts are * set to 0 */ __global__ void real2complex(double *f, cufftDoubleComplex *fc, int N) { /* Assume 1D grid of 1D blocks */ int index = blockIdx.x *blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; while (index < N) { fc[index].x = f[index]; fc[index].y = 0; index += stride; } } /* convert a cuffComplex data type to a double array. */ __global__ void complex2real(cufftDoubleComplex *fc, double *f, int N) { /* Assume 1D grid of 1D blocks */ int index = blockIdx.x *blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; while (index < N) { f[index] = fc[index].x; index += stride; } }
554543536b575876efbe3b342382e1265a2f76e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <fstream> #include <stdlib.h> #include <stdio.h> #include <string.h> __global__ void im2colOnDevice(unsigned int n, float* matAc, float* matA, int radiusF, int countF, int L, int M, int K, int C, int H) { for (int idx = blockIdx.x*blockDim.x+threadIdx.x; idx < n; idx += blockDim.x*gridDim.x) { int m = (idx/C)/L; int l = (idx/C)%L; int r = idx%C; if (m < M) { int w = m+radiusF; if (l < L) { int h = l+radiusF; for (int q = 0, oq = -1*radiusF; oq <= radiusF; q++, oq++) { for (int p = 0, op = -1*radiusF; op <= radiusF; p++, op++) { if (r < C) { matAc[(r+C*(p+K*q))+countF*(l+L*m)] = matA[r+C*((h+op)+H*(w+oq))]; } } } } } } } __global__ void gemm_gpu(double* a, double* b, double* c, int m, int n, int k) { int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; double tmp = 0; if (col < k && row < m) { for (int i = 0; i < n; i++) { tmp += a[row*n+i]*b[i*k+col]; } c[row*k+col] = tmp; } } int main(int argc, char const* argv[]) { int W = 1024; int H = 1024; int C = 4; int K = C; int blockSize = 256; int gridSize = 0; float time_gpu; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); int L = H-(K-1); int M = W-(K-1); int KERNELS_NUM = L*M*C; int countA = H*W*C; const size_t sizeA = countA*sizeof(float); int radiusF = (K-1)/2; int countF = K*K*C; int countLR = L*M; int countAc = countF*countLR; const size_t sizeAc = countAc*sizeof(float); float* matA = (float*)malloc(sizeA); srand((unsigned)time(0)); for (int i = 0; i < countA; i++) { matA[i] = rand()%10; } float* devA, *devAc, *retAc; hipMalloc((void**)&devA, sizeA); hipMalloc((void**)&devAc, sizeAc); retAc = (float*)malloc(sizeAc); hipMemcpy(devA, matA, sizeA, hipMemcpyHostToDevice); if (gridSize == 0) gridSize = (KERNELS_NUM+blockSize-1)/blockSize; hipEventRecord(start, 0); hipLaunchKernelGGL(( im2colOnDevice) , dim3(gridSize), dim3(blockSize) , 0, 0, KERNELS_NUM, devAc, devA, radiusF, countF, L, M, K, C, H); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time_gpu, start, stop); hipMemcpy(retAc, devAc, sizeAc, hipMemcpyDeviceToHost); printf("%f ms", time_gpu); hipFree(devA); hipFree(devAc); free(matA); free(retAc); return 0; }
554543536b575876efbe3b342382e1265a2f76e9.cu
#include <fstream> #include <stdlib.h> #include <stdio.h> #include <string.h> __global__ void im2colOnDevice(unsigned int n, float* matAc, float* matA, int radiusF, int countF, int L, int M, int K, int C, int H) { for (int idx = blockIdx.x*blockDim.x+threadIdx.x; idx < n; idx += blockDim.x*gridDim.x) { int m = (idx/C)/L; int l = (idx/C)%L; int r = idx%C; if (m < M) { int w = m+radiusF; if (l < L) { int h = l+radiusF; for (int q = 0, oq = -1*radiusF; oq <= radiusF; q++, oq++) { for (int p = 0, op = -1*radiusF; op <= radiusF; p++, op++) { if (r < C) { matAc[(r+C*(p+K*q))+countF*(l+L*m)] = matA[r+C*((h+op)+H*(w+oq))]; } } } } } } } __global__ void gemm_gpu(double* a, double* b, double* c, int m, int n, int k) { int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; double tmp = 0; if (col < k && row < m) { for (int i = 0; i < n; i++) { tmp += a[row*n+i]*b[i*k+col]; } c[row*k+col] = tmp; } } int main(int argc, char const* argv[]) { int W = 1024; int H = 1024; int C = 4; int K = C; int blockSize = 256; int gridSize = 0; float time_gpu; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int L = H-(K-1); int M = W-(K-1); int KERNELS_NUM = L*M*C; int countA = H*W*C; const size_t sizeA = countA*sizeof(float); int radiusF = (K-1)/2; int countF = K*K*C; int countLR = L*M; int countAc = countF*countLR; const size_t sizeAc = countAc*sizeof(float); float* matA = (float*)malloc(sizeA); srand((unsigned)time(0)); for (int i = 0; i < countA; i++) { matA[i] = rand()%10; } float* devA, *devAc, *retAc; cudaMalloc((void**)&devA, sizeA); cudaMalloc((void**)&devAc, sizeAc); retAc = (float*)malloc(sizeAc); cudaMemcpy(devA, matA, sizeA, cudaMemcpyHostToDevice); if (gridSize == 0) gridSize = (KERNELS_NUM+blockSize-1)/blockSize; cudaEventRecord(start, 0); im2colOnDevice <<<gridSize, blockSize >>> (KERNELS_NUM, devAc, devA, radiusF, countF, L, M, K, C, H); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time_gpu, start, stop); cudaMemcpy(retAc, devAc, sizeAc, cudaMemcpyDeviceToHost); printf("共用时间%f ms", time_gpu); cudaFree(devA); cudaFree(devAc); free(matA); free(retAc); return 0; }
80b0a42ebb524a3f43c384697afff28a5283d1fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { } __global__ void updateEst(int N, int M, float beta2, float scale, float *PARAMS, float *AVG, float *EST) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = j*N + i; float beta2a = __fsub_rn(1.0, beta2); if (i < N && j < M) { //AVG[index] = beta2*AVG[index] + (1.0-beta2)*PARAMS[index]; //EST[index] = scale*AVG[index]; AVG[index] = __fmaf_rn(beta2a,PARAMS[index],__fmul_rn(beta2,AVG[index])); EST[index] = __fmul_rn(scale, AVG[index]); } }
80b0a42ebb524a3f43c384697afff28a5283d1fc.cu
#include "includes.h" extern "C" { } __global__ void updateEst(int N, int M, float beta2, float scale, float *PARAMS, float *AVG, float *EST) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = j*N + i; float beta2a = __fsub_rn(1.0, beta2); if (i < N && j < M) { //AVG[index] = beta2*AVG[index] + (1.0-beta2)*PARAMS[index]; //EST[index] = scale*AVG[index]; AVG[index] = __fmaf_rn(beta2a,PARAMS[index],__fmul_rn(beta2,AVG[index])); EST[index] = __fmul_rn(scale, AVG[index]); } }
f4dbacfe83bed238a0d1cd4b8ba87cb62ec230e8.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <sys/mman.h> #include <stdio.h> #include <string.h> #include <errno.h> #include <chrono> #include "benchmark/benchmark.h" int readMemory(uint32_t* mem, int length) { int i = 0; int sum = 0; while (length--) { sum += mem[i++]; } return sum; } void writeMemory(uint32_t* mem, int length) { int i = 0; int v = 0; while (length--) { mem[i++] = v++; } } const int kItems = 10000000; const int kMemSize = kItems * sizeof(int); void fillMemory(uint32_t* p) { for (int i = 0; i < kItems; ++i) { p[i] = 1; } } #define check(x) { if ((x) != kItems) state.SkipWithError("Validation fail"); } static void BM_malloc_read(benchmark::State& state) { uint32_t* p = (uint32_t*)malloc(kMemSize); fillMemory(p); while(state.KeepRunning()) check(readMemory(p, kItems)); free(p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_malloc_write(benchmark::State& state) { uint32_t* p = (uint32_t*)malloc(kMemSize); while(state.KeepRunning()) writeMemory(p, kItems); free(p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_mmap_read(benchmark::State& state) { uint32_t* p = (uint32_t*)mmap( 0, kMemSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_LOCKED | MAP_ANONYMOUS, -1, 0); fillMemory(p); while(state.KeepRunning()) check(readMemory(p, kItems)); munmap(p, kMemSize); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_mmap_write(benchmark::State& state) { uint32_t* p = (uint32_t*)mmap( 0, kMemSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_LOCKED | MAP_ANONYMOUS, -1, 0); while(state.KeepRunning()) writeMemory(p, kItems); munmap(p, kMemSize); state.SetBytesProcessed(state.iterations() * kMemSize); } #define cudaSafeCall(x) if ((x)) { \ state.SkipWithError(hipGetErrorString(hipGetLastError())); \ return; \ } static void BM_cuda_copy_h2d(benchmark::State& state) { uint32_t* host_p = (uint32_t*)malloc(kMemSize); uint32_t* dev_p; cudaSafeCall(hipMalloc(&dev_p, kMemSize)); fillMemory(host_p); while (state.KeepRunning()) { hipMemcpy(dev_p, host_p, kMemSize, hipMemcpyHostToDevice); } hipFree(dev_p); free(host_p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_cuda_copy_d2h(benchmark::State& state) { uint32_t* host_p = (uint32_t*)malloc(kMemSize); uint32_t* dev_p; cudaSafeCall(hipMalloc(&dev_p, kMemSize)); fillMemory(host_p); hipMemcpy(dev_p, host_p, kMemSize, hipMemcpyHostToDevice); while (state.KeepRunning()) { hipMemcpy(host_p, dev_p, kMemSize, hipMemcpyDeviceToHost); } hipFree(dev_p); free(host_p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_cuda_copy_h2d_async(benchmark::State& state) { uint32_t* host_p = (uint32_t*)malloc(kMemSize); uint32_t* dev_p; cudaSafeCall(hipMalloc(&dev_p, kMemSize)); fillMemory(host_p); while (state.KeepRunning()) { auto start = std::chrono::high_resolution_clock::now(); hipMemcpyAsync(dev_p, host_p, kMemSize, hipMemcpyHostToDevice); auto end = std::chrono::high_resolution_clock::now(); hipDeviceSynchronize(); state.SetIterationTime( std::chrono::duration_cast<std::chrono::duration<double>>(end - start).count()); } hipFree(dev_p); free(host_p); } static void BM_cuda_copy_d2h_async(benchmark::State& state) { uint32_t* host_p = (uint32_t*)malloc(kMemSize); uint32_t* dev_p; cudaSafeCall(hipMalloc(&dev_p, kMemSize)); fillMemory(host_p); hipMemcpy(dev_p, host_p, kMemSize, hipMemcpyHostToDevice); while (state.KeepRunning()) { auto start = std::chrono::high_resolution_clock::now(); hipMemcpyAsync(host_p, dev_p, kMemSize, hipMemcpyDeviceToHost); auto end = std::chrono::high_resolution_clock::now(); hipDeviceSynchronize(); state.SetIterationTime( std::chrono::duration_cast<std::chrono::duration<double>>(end - start).count()); } hipFree(dev_p); free(host_p); } static void BM_cuda_malloc_read(benchmark::State& state) { uint32_t* p; cudaSafeCall(hipHostMalloc(&p, kMemSize)); fillMemory(p); while(state.KeepRunning()) check(readMemory(p, kItems)); hipHostFree(p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_cuda_malloc_write(benchmark::State& state) { uint32_t* p; cudaSafeCall(hipHostMalloc(&p, kMemSize)); while(state.KeepRunning()) writeMemory(p, kItems); hipHostFree(p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_pinned_read(benchmark::State& state) { uint32_t* p; cudaSafeCall(hipHostMalloc(&p, kMemSize, hipHostMallocDefault)); fillMemory(p); while(state.KeepRunning()) check(readMemory(p, kItems)); hipHostFree(p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_pinned_write(benchmark::State& state) { uint32_t* p; cudaSafeCall(hipHostMalloc(&p, kMemSize, hipHostMallocDefault)); while(state.KeepRunning()) writeMemory(p, kItems); hipHostFree(p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_mapped_read(benchmark::State& state) { uint32_t* p; cudaSafeCall(hipHostMalloc(&p, kMemSize, hipHostMallocMapped)); fillMemory(p); while(state.KeepRunning()) check(readMemory(p, kItems)); hipHostFree(p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_mapped_write(benchmark::State& state) { uint32_t* p; cudaSafeCall(hipHostMalloc(&p, kMemSize, hipHostMallocMapped)); while(state.KeepRunning()) writeMemory(p, kItems); hipHostFree(p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_managed_read(benchmark::State& state) { uint32_t* p; cudaSafeCall(hipMallocManaged(&p, kMemSize)); fillMemory(p); while(state.KeepRunning()) check(readMemory(p, kItems)); hipFree(p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_managed_write(benchmark::State& state) { uint32_t* p; cudaSafeCall(hipMallocManaged(&p, kMemSize)); while(state.KeepRunning()) writeMemory(p, kItems); hipFree(p); state.SetBytesProcessed(state.iterations() * kMemSize); } BENCHMARK(BM_malloc_read); BENCHMARK(BM_malloc_write); /* BENCHMARK(BM_mmap_read); */ /* BENCHMARK(BM_mmap_write); */ BENCHMARK(BM_cuda_copy_h2d); BENCHMARK(BM_cuda_copy_d2h); BENCHMARK(BM_cuda_copy_h2d_async)->UseManualTime(); BENCHMARK(BM_cuda_copy_d2h_async)->UseManualTime(); BENCHMARK(BM_cuda_malloc_read); BENCHMARK(BM_cuda_malloc_write); BENCHMARK(BM_pinned_read); BENCHMARK(BM_pinned_write); BENCHMARK(BM_mapped_read); BENCHMARK(BM_mapped_write); BENCHMARK(BM_managed_read); BENCHMARK(BM_managed_write); int main(int argc, char** argv) { benchmark::Initialize(&argc, argv); benchmark::RunSpecifiedBenchmarks(); return 0; }
f4dbacfe83bed238a0d1cd4b8ba87cb62ec230e8.cu
#include <cuda_runtime.h> #include <sys/mman.h> #include <stdio.h> #include <string.h> #include <errno.h> #include <chrono> #include "benchmark/benchmark.h" int readMemory(uint32_t* mem, int length) { int i = 0; int sum = 0; while (length--) { sum += mem[i++]; } return sum; } void writeMemory(uint32_t* mem, int length) { int i = 0; int v = 0; while (length--) { mem[i++] = v++; } } const int kItems = 10000000; const int kMemSize = kItems * sizeof(int); void fillMemory(uint32_t* p) { for (int i = 0; i < kItems; ++i) { p[i] = 1; } } #define check(x) { if ((x) != kItems) state.SkipWithError("Validation fail"); } static void BM_malloc_read(benchmark::State& state) { uint32_t* p = (uint32_t*)malloc(kMemSize); fillMemory(p); while(state.KeepRunning()) check(readMemory(p, kItems)); free(p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_malloc_write(benchmark::State& state) { uint32_t* p = (uint32_t*)malloc(kMemSize); while(state.KeepRunning()) writeMemory(p, kItems); free(p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_mmap_read(benchmark::State& state) { uint32_t* p = (uint32_t*)mmap( 0, kMemSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_LOCKED | MAP_ANONYMOUS, -1, 0); fillMemory(p); while(state.KeepRunning()) check(readMemory(p, kItems)); munmap(p, kMemSize); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_mmap_write(benchmark::State& state) { uint32_t* p = (uint32_t*)mmap( 0, kMemSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_LOCKED | MAP_ANONYMOUS, -1, 0); while(state.KeepRunning()) writeMemory(p, kItems); munmap(p, kMemSize); state.SetBytesProcessed(state.iterations() * kMemSize); } #define cudaSafeCall(x) if ((x)) { \ state.SkipWithError(cudaGetErrorString(cudaGetLastError())); \ return; \ } static void BM_cuda_copy_h2d(benchmark::State& state) { uint32_t* host_p = (uint32_t*)malloc(kMemSize); uint32_t* dev_p; cudaSafeCall(cudaMalloc(&dev_p, kMemSize)); fillMemory(host_p); while (state.KeepRunning()) { cudaMemcpy(dev_p, host_p, kMemSize, cudaMemcpyHostToDevice); } cudaFree(dev_p); free(host_p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_cuda_copy_d2h(benchmark::State& state) { uint32_t* host_p = (uint32_t*)malloc(kMemSize); uint32_t* dev_p; cudaSafeCall(cudaMalloc(&dev_p, kMemSize)); fillMemory(host_p); cudaMemcpy(dev_p, host_p, kMemSize, cudaMemcpyHostToDevice); while (state.KeepRunning()) { cudaMemcpy(host_p, dev_p, kMemSize, cudaMemcpyDeviceToHost); } cudaFree(dev_p); free(host_p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_cuda_copy_h2d_async(benchmark::State& state) { uint32_t* host_p = (uint32_t*)malloc(kMemSize); uint32_t* dev_p; cudaSafeCall(cudaMalloc(&dev_p, kMemSize)); fillMemory(host_p); while (state.KeepRunning()) { auto start = std::chrono::high_resolution_clock::now(); cudaMemcpyAsync(dev_p, host_p, kMemSize, cudaMemcpyHostToDevice); auto end = std::chrono::high_resolution_clock::now(); cudaDeviceSynchronize(); state.SetIterationTime( std::chrono::duration_cast<std::chrono::duration<double>>(end - start).count()); } cudaFree(dev_p); free(host_p); } static void BM_cuda_copy_d2h_async(benchmark::State& state) { uint32_t* host_p = (uint32_t*)malloc(kMemSize); uint32_t* dev_p; cudaSafeCall(cudaMalloc(&dev_p, kMemSize)); fillMemory(host_p); cudaMemcpy(dev_p, host_p, kMemSize, cudaMemcpyHostToDevice); while (state.KeepRunning()) { auto start = std::chrono::high_resolution_clock::now(); cudaMemcpyAsync(host_p, dev_p, kMemSize, cudaMemcpyDeviceToHost); auto end = std::chrono::high_resolution_clock::now(); cudaDeviceSynchronize(); state.SetIterationTime( std::chrono::duration_cast<std::chrono::duration<double>>(end - start).count()); } cudaFree(dev_p); free(host_p); } static void BM_cuda_malloc_read(benchmark::State& state) { uint32_t* p; cudaSafeCall(cudaMallocHost(&p, kMemSize)); fillMemory(p); while(state.KeepRunning()) check(readMemory(p, kItems)); cudaFreeHost(p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_cuda_malloc_write(benchmark::State& state) { uint32_t* p; cudaSafeCall(cudaMallocHost(&p, kMemSize)); while(state.KeepRunning()) writeMemory(p, kItems); cudaFreeHost(p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_pinned_read(benchmark::State& state) { uint32_t* p; cudaSafeCall(cudaHostAlloc(&p, kMemSize, cudaHostAllocDefault)); fillMemory(p); while(state.KeepRunning()) check(readMemory(p, kItems)); cudaFreeHost(p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_pinned_write(benchmark::State& state) { uint32_t* p; cudaSafeCall(cudaHostAlloc(&p, kMemSize, cudaHostAllocDefault)); while(state.KeepRunning()) writeMemory(p, kItems); cudaFreeHost(p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_mapped_read(benchmark::State& state) { uint32_t* p; cudaSafeCall(cudaHostAlloc(&p, kMemSize, cudaHostAllocMapped)); fillMemory(p); while(state.KeepRunning()) check(readMemory(p, kItems)); cudaFreeHost(p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_mapped_write(benchmark::State& state) { uint32_t* p; cudaSafeCall(cudaHostAlloc(&p, kMemSize, cudaHostAllocMapped)); while(state.KeepRunning()) writeMemory(p, kItems); cudaFreeHost(p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_managed_read(benchmark::State& state) { uint32_t* p; cudaSafeCall(cudaMallocManaged(&p, kMemSize)); fillMemory(p); while(state.KeepRunning()) check(readMemory(p, kItems)); cudaFree(p); state.SetBytesProcessed(state.iterations() * kMemSize); } static void BM_managed_write(benchmark::State& state) { uint32_t* p; cudaSafeCall(cudaMallocManaged(&p, kMemSize)); while(state.KeepRunning()) writeMemory(p, kItems); cudaFree(p); state.SetBytesProcessed(state.iterations() * kMemSize); } BENCHMARK(BM_malloc_read); BENCHMARK(BM_malloc_write); /* BENCHMARK(BM_mmap_read); */ /* BENCHMARK(BM_mmap_write); */ BENCHMARK(BM_cuda_copy_h2d); BENCHMARK(BM_cuda_copy_d2h); BENCHMARK(BM_cuda_copy_h2d_async)->UseManualTime(); BENCHMARK(BM_cuda_copy_d2h_async)->UseManualTime(); BENCHMARK(BM_cuda_malloc_read); BENCHMARK(BM_cuda_malloc_write); BENCHMARK(BM_pinned_read); BENCHMARK(BM_pinned_write); BENCHMARK(BM_mapped_read); BENCHMARK(BM_mapped_write); BENCHMARK(BM_managed_read); BENCHMARK(BM_managed_write); int main(int argc, char** argv) { benchmark::Initialize(&argc, argv); benchmark::RunSpecifiedBenchmarks(); return 0; }
52880172a88e0583f32e30725d5255ec7d1213f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <iostream> #include <time.h> #include <hiprand/hiprand.h> bool stream_init(hipStream_t* stream) { int* num = new int; hipGetDeviceCount(num); try { for (int i = 0;i < *num;i++) { hipStreamCreate(&stream[i]); } } catch (...) { return false; } delete num; return true; } bool stream_dispose(hipStream_t* stream) { int* num = new int; hipGetDeviceCount(num); try { for (int i = 0;i < *num;i++) { hipStreamDestroy(stream[i]); } } catch (...) { return false; } delete num; return true; } bool generateRandArray(float* numArray, int arraySize) { try { float* dev_a; srand(time(NULL)); hipSetDevice(0); hipMalloc((void**)&dev_a, arraySize * sizeof(int)); hiprandGenerator_t gen; hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL); hiprandGenerateUniform(gen, dev_a, arraySize); hipMemcpy(numArray, dev_a, arraySize * sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_a); hiprandDestroyGenerator(gen); } catch (...) { return false; } return true; } bool askAboutMemory(int arraySize) { char ch; std::cout << "\nThis programm need more then " << arraySize * sizeof(float) / 1024. / 1024 / 1024 * 2 << " GB RAM. Continue?\n(Y/N)_"; std::cin >> ch; std::cout << std::endl; if (ch != 'y' && ch != 'Y') return false; return true; } void lazzzyArrayPrint(float* arr, int arrSize) { std::cout << '\n' << "Array len: " << arrSize << '\n' << "Array items:" << '\n' << std::endl; std::cout << arr[0]<< " " << arr[1] << " " << arr[2] << std::endl; std::cout << " ... " << std::endl; std::cout << arr[arrSize / 2 - 1] << " " << arr[arrSize/2] << " " << arr[arrSize / 2 + 1] << std::endl; std::cout << " ... " << std::endl; std::cout << arr[arrSize - 3] << " " << arr[arrSize - 2] << " " << arr[arrSize - 1] << '\n' << std::endl; } __global__ void _kernel(float* a) { int i = blockDim.x * blockIdx.x + threadIdx.x; int d; hipGetDevice(&d); printf("Kernel %d working\n", d); hipDeviceGetAttribute(&d, hipDeviceAttributePciBusId , d); a[i] = a[i] * a[i] + d; } int main() { // . int num; hipGetDeviceCount(&num); std::cout << "Detcted device count: " << num << '\n' << std::endl; for (int i = 0;i < num;i++) { // Query the device properties. hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); std::cout << "Device id: " << i << std::endl; std::cout << "Device name: " << prop.name << std::endl; } // . - . // - id hipStream_t *stream = new hipStream_t[num]; // if (!stream_init(stream)) return 1; const int arraySize = 2;// 1024 * 1024 * 512; const int sizePerSt = arraySize / num; /*if (!askAboutMemory(arraySize)) return 2;*/ float *a = new float[arraySize]; float** dev_a; if (arraySize % num == 0) { // - // dev_a = new float*[num]; for (int j = 0; j < num; j++) dev_a[j] = new float[arraySize]; } /*else { dev_a = new float* [num]; for (int j = 0; j < num; j++) dev_a[j] = new float[sizePerSt]; } */ if (!generateRandArray(a, arraySize)) return 1; lazzzyArrayPrint(a, arraySize); // dim3 threads = dim3(2); dim3 blocks = dim3(1); // try { for (int i = 0; i < num; i++) { hipSetDevice(i); hipMalloc((void**)&dev_a[i], arraySize * sizeof(float)); hipMemcpyAsync(dev_a[i], a, arraySize * sizeof(float), hipMemcpyHostToDevice); } } catch (...) { return 3; } // try { for (int i = 0; i < num; i++) { hipSetDevice(i); hipDeviceSynchronize(); int d; hipGetDevice(&d); hipDeviceGetAttribute(&d, hipDeviceAttributePciDeviceId, d); printf("Kernel %d started\n", d); hipLaunchKernelGGL(( _kernel) , dim3(blocks), dim3(threads), 0, stream[i], dev_a[i]); //printf("Kernel stoped\n"); } } catch (...) { return 3; } // try { for (int i = 0; i < num; i++) { delete a; hipSetDevice(i); hipMemcpy(a, dev_a[i], arraySize * sizeof(float), hipMemcpyDeviceToHost); std::cout << "Theoretecly data from " << i << " device." << std::endl; lazzzyArrayPrint(a, arraySize); } } catch (...) { return 3; } hipFree(dev_a); //lazzzyArrayPrint(a, arraySize); // if (!stream_dispose(stream)) return 1; return 0; }
52880172a88e0583f32e30725d5255ec7d1213f9.cu
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <iostream> #include <time.h> #include <curand.h> bool stream_init(cudaStream_t* stream) { int* num = new int; cudaGetDeviceCount(num); try { for (int i = 0;i < *num;i++) { cudaStreamCreate(&stream[i]); } } catch (...) { return false; } delete num; return true; } bool stream_dispose(cudaStream_t* stream) { int* num = new int; cudaGetDeviceCount(num); try { for (int i = 0;i < *num;i++) { cudaStreamDestroy(stream[i]); } } catch (...) { return false; } delete num; return true; } bool generateRandArray(float* numArray, int arraySize) { try { float* dev_a; srand(time(NULL)); cudaSetDevice(0); cudaMalloc((void**)&dev_a, arraySize * sizeof(int)); curandGenerator_t gen; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(gen, 1234ULL); curandGenerateUniform(gen, dev_a, arraySize); cudaMemcpy(numArray, dev_a, arraySize * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_a); curandDestroyGenerator(gen); } catch (...) { return false; } return true; } bool askAboutMemory(int arraySize) { char ch; std::cout << "\nThis programm need more then " << arraySize * sizeof(float) / 1024. / 1024 / 1024 * 2 << " GB RAM. Continue?\n(Y/N)_"; std::cin >> ch; std::cout << std::endl; if (ch != 'y' && ch != 'Y') return false; return true; } void lazzzyArrayPrint(float* arr, int arrSize) { std::cout << '\n' << "Array len: " << arrSize << '\n' << "Array items:" << '\n' << std::endl; std::cout << arr[0]<< " " << arr[1] << " " << arr[2] << std::endl; std::cout << " ... " << std::endl; std::cout << arr[arrSize / 2 - 1] << " " << arr[arrSize/2] << " " << arr[arrSize / 2 + 1] << std::endl; std::cout << " ... " << std::endl; std::cout << arr[arrSize - 3] << " " << arr[arrSize - 2] << " " << arr[arrSize - 1] << '\n' << std::endl; } __global__ void _kernel(float* a) { int i = blockDim.x * blockIdx.x + threadIdx.x; int d; cudaGetDevice(&d); printf("Kernel %d working\n", d); cudaDeviceGetAttribute(&d, cudaDevAttrPciBusId , d); a[i] = a[i] * a[i] + d; } int main() { // Разминочный блок. Определяет количество гпу и выводит их характеристики int num; cudaGetDeviceCount(&num); std::cout << "Detcted device count: " << num << '\n' << std::endl; for (int i = 0;i < num;i++) { // Query the device properties. cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); std::cout << "Device id: " << i << std::endl; std::cout << "Device name: " << prop.name << std::endl; } // Массив потоков. Каждый поток - это отдельная видеокарта. // Порядковый номер в массиве - id карты cudaStream_t *stream = new cudaStream_t[num]; // Иниацилизация потоков по одному потоку на каждое устройство if (!stream_init(stream)) return 1; const int arraySize = 2;// 1024 * 1024 * 512; const int sizePerSt = arraySize / num; /*if (!askAboutMemory(arraySize)) return 2;*/ float *a = new float[arraySize]; float** dev_a; if (arraySize % num == 0) { // Предпологаем что количество эл-тов кратно количеству гпу // Проверить что сработает создавать массив из массивов для разных ГПУ dev_a = new float*[num]; for (int j = 0; j < num; j++) dev_a[j] = new float[arraySize]; } /*else { dev_a = new float* [num]; for (int j = 0; j < num; j++) dev_a[j] = new float[sizePerSt]; } */ if (!generateRandArray(a, arraySize)) return 1; lazzzyArrayPrint(a, arraySize); //Подготовка к запуску ядра dim3 threads = dim3(2); dim3 blocks = dim3(1); // Подготовка и передач данных на карты try { for (int i = 0; i < num; i++) { cudaSetDevice(i); cudaMalloc((void**)&dev_a[i], arraySize * sizeof(float)); cudaMemcpyAsync(dev_a[i], a, arraySize * sizeof(float), cudaMemcpyHostToDevice); } } catch (...) { return 3; } // Запуск ядра try { for (int i = 0; i < num; i++) { cudaSetDevice(i); cudaDeviceSynchronize(); int d; cudaGetDevice(&d); cudaDeviceGetAttribute(&d, cudaDevAttrPciDeviceId, d); printf("Kernel %d started\n", d); _kernel <<<blocks, threads, 0, stream[i]>>> (dev_a[i]); //printf("Kernel stoped\n"); } } catch (...) { return 3; } // получение данных обратно try { for (int i = 0; i < num; i++) { delete a; cudaSetDevice(i); cudaMemcpy(a, dev_a[i], arraySize * sizeof(float), cudaMemcpyDeviceToHost); std::cout << "Theoretecly data from " << i << " device." << std::endl; lazzzyArrayPrint(a, arraySize); } } catch (...) { return 3; } cudaFree(dev_a); //lazzzyArrayPrint(a, arraySize); // Убийство всех потоков if (!stream_dispose(stream)) return 1; return 0; }
b903919d3a66b8d92d04c2ba527bafdf46ee4ea1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #define VERTICES 600 extern "C" { __global__ void cn_pnpoly(int *bitmap, float2 *points, float2 *vertices, int n); __global__ void cn_pnpoly_reference_kernel(int *bitmap, float2 *points, float2 *vertices, int n); } /* * This file contains the implementation of a CUDA Kernel for the * point-in-polygon problem using the crossing number algorithm * * Simplified for use in the NLeSC GPU Course * * The algorithm used here is adapted from: * 'Inclusion of a Point in a Polygon', Dan Sunday, 2001 * (http://geomalgorithms.com/a03-_inclusion.html) * * Author: Ben van Werkhoven <b.vanwerkhoven@esciencecenter.nl> */ __global__ void cn_pnpoly(int *bitmap, float2 *points, float2 *vertices, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { int c = 0; float2 p = points[i]; int k = VERTICES-1; for (int j=0; j<VERTICES; k = j++) { // edge from vk to vj float2 vj = vertices[j]; float2 vk = vertices[k]; float slope = (vk.x-vj.x) / (vk.y-vj.y); if ( ( (vj.y>p.y) != (vk.y>p.y)) && //if p is between vj and vk vertically (p.x < slope * (p.y-vj.y) + vj.x) ) { //if p.x crosses the line vk-vj when moved in positive x-direction c = !c; } } bitmap[i] = c; // 0 if even (out), and 1 if odd (in) } } int main() { hipSetDeviceFlags(hipDeviceMapHost); hipSetDevice(0); hipDeviceSynchronize(); hipError_t err; int stat; int num_points = (int)2e7; float2 *h_vertices; float2 *d_vertices; float2 *h_points; int *h_bitmap; int *h_reference; //Allocate pinned and aligned host memory and copy input data err = hipHostMalloc((void **)&h_vertices, VERTICES*sizeof(float2), hipHostMallocMapped); if (err != hipSuccess) { fprintf(stderr, "Error in hipHostMalloc: %s\n", hipGetErrorString(err)); } err = hipHostMalloc((void **)&h_points, num_points *sizeof(float2), hipHostMallocMapped); if (err != hipSuccess) { fprintf(stderr, "Error in hipHostMalloc: %s\n", hipGetErrorString(err)); } err = hipHostMalloc((void **)&h_bitmap, num_points *sizeof(int), hipHostMallocMapped); if (err != hipSuccess) { fprintf(stderr, "Error in hipHostMalloc: %s\n", hipGetErrorString(err)); } err = hipHostMalloc((void **)&h_reference, num_points *sizeof(int), hipHostMallocMapped); if (err != hipSuccess) { fprintf(stderr, "Error in hipHostMalloc: %s\n", hipGetErrorString(err)); } // generate random input for (int i=0; i< num_points; i++) { h_points[i].x = 50.0 / (rand() % 1000); h_points[i].y = 50.0 / (rand() % 1000); } // read vertices from disk FILE *file = fopen("vertices.dat", "rb"); stat = fread(h_vertices, sizeof(float), 2*VERTICES, file); if (stat < 2*VERTICES) { fprintf(stderr, "Error in fread()\n"); } // allocate device memory for storing the vertices err = hipMalloc((void **)&d_vertices, VERTICES*sizeof(float2)); if (err != hipSuccess) { fprintf(stderr, "Error in hipMalloc: %s\n", hipGetErrorString( err )); } // transfer vertices to d_vertices err = hipMemcpy(d_vertices, h_vertices, VERTICES*sizeof(float2), hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Error in hipMemcpy: %s\n", hipGetErrorString(err)); } // create CUDA streams and events hipStream_t stream[1]; err = hipStreamCreate(&stream[0]); if (err != hipSuccess) { fprintf(stderr, "Error in hipStreamCreate: %s\n", hipGetErrorString(err)); } hipEvent_t start; err = hipEventCreate(&start); if (err != hipSuccess) { fprintf(stderr, "Error in hipEventCreate: %s\n", hipGetErrorString(err)); } hipEvent_t stop; err = hipEventCreate(&stop); if (err != hipSuccess) { fprintf(stderr, "Error in hipEventCreate: %s\n", hipGetErrorString(err)); } hipDeviceSynchronize(); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Error after memory setup: %s\n", hipGetErrorString(err)); } //kernel parameters dim3 threads(256, 1, 1); dim3 grid((int)ceil(num_points / (float)threads.x), 1); //run the kernel a few times to warmup the device for (int i=0; i<5; i++) { hipLaunchKernelGGL(( cn_pnpoly_reference_kernel), dim3(grid), dim3(threads), 0, stream[0], h_reference, h_points, d_vertices, num_points); } memset(h_bitmap, 0, num_points*sizeof(int)); //start measuring time hipDeviceSynchronize(); hipEventRecord(start, stream[0]); //call the kernel hipLaunchKernelGGL(( cn_pnpoly), dim3(grid), dim3(threads), 0, stream[0], h_bitmap, h_points, d_vertices, num_points); //stop time measurement hipEventRecord(stop, stream[0]); hipDeviceSynchronize(); float time = 0.0; hipEventElapsedTime(&time, start, stop); printf("cn_pnpoly kernel took: %f (ms)\n", time); //compute reference answer and measure time hipDeviceSynchronize(); hipEventRecord(start, stream[0]); hipLaunchKernelGGL(( cn_pnpoly_reference_kernel), dim3(grid), dim3(threads), 0, stream[0], h_reference, h_points, d_vertices, num_points); hipEventRecord(stop, stream[0]); hipDeviceSynchronize(); hipEventElapsedTime(&time, start, stop); printf("reference kernel took: %f (ms)\n", time); //cleanup hipStreamDestroy(stream[0]); hipEventDestroy(start); hipEventDestroy(stop); hipFree(d_vertices); hipHostFree(h_vertices); hipHostFree(h_points); //final check for errors hipDeviceSynchronize(); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Error after CUDA kernel: %s\n", hipGetErrorString(err)); exit(1); } else { int zeros = 0; int errors = 0; int print = 0; for (int i=0; i<num_points; i++) { if (h_reference[i] == 0) { zeros++; } if (h_bitmap[i] != h_reference[i]) { errors++; if (print++ < 10) { fprintf(stderr, "error at %d, reference=%d, answer=%d\n", i, h_reference[i], h_bitmap[i]); } } } if (zeros == num_points) { printf("Error: reference output is only zeros\n"); } else { if (errors == 0) { printf("ok!\n"); } else { printf("there were %d errors\n", errors); } } } hipHostFree(h_bitmap); hipHostFree(h_reference); return 0; } /* * Reference kernel * * This kernel is kept for checking the output of the above kernel, PLEASE DO NOT MODIFY THIS KERNEL */ __global__ void cn_pnpoly_reference_kernel(int *bitmap, float2 *points, float2 *vertices, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { int c = 0; float2 p = points[i]; // DO NOT MODIFY THIS KERNEL int k = VERTICES-1; for (int j=0; j<VERTICES; k = j++) { float2 vj = vertices[j]; // DO NOT MODIFY THIS KERNEL float2 vk = vertices[k]; float slope = (vk.x-vj.x) / (vk.y-vj.y); if ( ( (vj.y>p.y) != (vk.y>p.y)) && (p.x < slope * (p.y-vj.y) + vj.x) ) { c = !c; } } bitmap[i] = c; // DO NOT MODIFY THIS KERNEL } }
b903919d3a66b8d92d04c2ba527bafdf46ee4ea1.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #define VERTICES 600 extern "C" { __global__ void cn_pnpoly(int *bitmap, float2 *points, float2 *vertices, int n); __global__ void cn_pnpoly_reference_kernel(int *bitmap, float2 *points, float2 *vertices, int n); } /* * This file contains the implementation of a CUDA Kernel for the * point-in-polygon problem using the crossing number algorithm * * Simplified for use in the NLeSC GPU Course * * The algorithm used here is adapted from: * 'Inclusion of a Point in a Polygon', Dan Sunday, 2001 * (http://geomalgorithms.com/a03-_inclusion.html) * * Author: Ben van Werkhoven <b.vanwerkhoven@esciencecenter.nl> */ __global__ void cn_pnpoly(int *bitmap, float2 *points, float2 *vertices, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { int c = 0; float2 p = points[i]; int k = VERTICES-1; for (int j=0; j<VERTICES; k = j++) { // edge from vk to vj float2 vj = vertices[j]; float2 vk = vertices[k]; float slope = (vk.x-vj.x) / (vk.y-vj.y); if ( ( (vj.y>p.y) != (vk.y>p.y)) && //if p is between vj and vk vertically (p.x < slope * (p.y-vj.y) + vj.x) ) { //if p.x crosses the line vk-vj when moved in positive x-direction c = !c; } } bitmap[i] = c; // 0 if even (out), and 1 if odd (in) } } int main() { cudaSetDeviceFlags(cudaDeviceMapHost); cudaSetDevice(0); cudaDeviceSynchronize(); cudaError_t err; int stat; int num_points = (int)2e7; float2 *h_vertices; float2 *d_vertices; float2 *h_points; int *h_bitmap; int *h_reference; //Allocate pinned and aligned host memory and copy input data err = cudaHostAlloc((void **)&h_vertices, VERTICES*sizeof(float2), cudaHostAllocMapped); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString(err)); } err = cudaHostAlloc((void **)&h_points, num_points *sizeof(float2), cudaHostAllocMapped); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString(err)); } err = cudaHostAlloc((void **)&h_bitmap, num_points *sizeof(int), cudaHostAllocMapped); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString(err)); } err = cudaHostAlloc((void **)&h_reference, num_points *sizeof(int), cudaHostAllocMapped); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString(err)); } // generate random input for (int i=0; i< num_points; i++) { h_points[i].x = 50.0 / (rand() % 1000); h_points[i].y = 50.0 / (rand() % 1000); } // read vertices from disk FILE *file = fopen("vertices.dat", "rb"); stat = fread(h_vertices, sizeof(float), 2*VERTICES, file); if (stat < 2*VERTICES) { fprintf(stderr, "Error in fread()\n"); } // allocate device memory for storing the vertices err = cudaMalloc((void **)&d_vertices, VERTICES*sizeof(float2)); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaMalloc: %s\n", cudaGetErrorString( err )); } // transfer vertices to d_vertices err = cudaMemcpy(d_vertices, h_vertices, VERTICES*sizeof(float2), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaMemcpy: %s\n", cudaGetErrorString(err)); } // create CUDA streams and events cudaStream_t stream[1]; err = cudaStreamCreate(&stream[0]); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaStreamCreate: %s\n", cudaGetErrorString(err)); } cudaEvent_t start; err = cudaEventCreate(&start); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaEventCreate: %s\n", cudaGetErrorString(err)); } cudaEvent_t stop; err = cudaEventCreate(&stop); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaEventCreate: %s\n", cudaGetErrorString(err)); } cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Error after memory setup: %s\n", cudaGetErrorString(err)); } //kernel parameters dim3 threads(256, 1, 1); dim3 grid((int)ceil(num_points / (float)threads.x), 1); //run the kernel a few times to warmup the device for (int i=0; i<5; i++) { cn_pnpoly_reference_kernel<<<grid, threads, 0, stream[0]>>>(h_reference, h_points, d_vertices, num_points); } memset(h_bitmap, 0, num_points*sizeof(int)); //start measuring time cudaDeviceSynchronize(); cudaEventRecord(start, stream[0]); //call the kernel cn_pnpoly<<<grid, threads, 0, stream[0]>>>(h_bitmap, h_points, d_vertices, num_points); //stop time measurement cudaEventRecord(stop, stream[0]); cudaDeviceSynchronize(); float time = 0.0; cudaEventElapsedTime(&time, start, stop); printf("cn_pnpoly kernel took: %f (ms)\n", time); //compute reference answer and measure time cudaDeviceSynchronize(); cudaEventRecord(start, stream[0]); cn_pnpoly_reference_kernel<<<grid, threads, 0, stream[0]>>>(h_reference, h_points, d_vertices, num_points); cudaEventRecord(stop, stream[0]); cudaDeviceSynchronize(); cudaEventElapsedTime(&time, start, stop); printf("reference kernel took: %f (ms)\n", time); //cleanup cudaStreamDestroy(stream[0]); cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(d_vertices); cudaFreeHost(h_vertices); cudaFreeHost(h_points); //final check for errors cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Error after CUDA kernel: %s\n", cudaGetErrorString(err)); exit(1); } else { int zeros = 0; int errors = 0; int print = 0; for (int i=0; i<num_points; i++) { if (h_reference[i] == 0) { zeros++; } if (h_bitmap[i] != h_reference[i]) { errors++; if (print++ < 10) { fprintf(stderr, "error at %d, reference=%d, answer=%d\n", i, h_reference[i], h_bitmap[i]); } } } if (zeros == num_points) { printf("Error: reference output is only zeros\n"); } else { if (errors == 0) { printf("ok!\n"); } else { printf("there were %d errors\n", errors); } } } cudaFreeHost(h_bitmap); cudaFreeHost(h_reference); return 0; } /* * Reference kernel * * This kernel is kept for checking the output of the above kernel, PLEASE DO NOT MODIFY THIS KERNEL */ __global__ void cn_pnpoly_reference_kernel(int *bitmap, float2 *points, float2 *vertices, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { int c = 0; float2 p = points[i]; // DO NOT MODIFY THIS KERNEL int k = VERTICES-1; for (int j=0; j<VERTICES; k = j++) { float2 vj = vertices[j]; // DO NOT MODIFY THIS KERNEL float2 vk = vertices[k]; float slope = (vk.x-vj.x) / (vk.y-vj.y); if ( ( (vj.y>p.y) != (vk.y>p.y)) && (p.x < slope * (p.y-vj.y) + vj.x) ) { c = !c; } } bitmap[i] = c; // DO NOT MODIFY THIS KERNEL } }
1df660fa0aed73f5cb984090f7f09701f5ff70d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include "para.h" struct kernel_para{ int *A, *B; volatile int *C; volatile int size; volatile int block; volatile int thread; volatile int warp; volatile int req; volatile int ready; volatile int funcId; volatile int taskId; volatile int doneHost; int doneGPU; }; struct kernel_para_GPU{ int warpId; int baseId; int taskId; }; __device__ void init_queue(struct kernel_para_GPU *warpPool){ int warpIdxx = (blockIdx.x*blockDim.x+threadIdx.x)/32; if((threadIdx.x) != 0){ warpPool[warpIdxx+threadIdx.x].warpId = 0; }else{ warpPool[warpIdxx+threadIdx.x].warpId = 1; } } __device__ void MatMul_kernel(int *A, int *B, int *C, int Size, int baseTid){ #if 1 int row = baseTid + (threadIdx.x & 0x1f); for (int j = 0; j < Size; j++){ int sum = 0; for (int k = 0; k < Size; k++){ int a = A[row * Size + k]; int b = B[k * Size + j]; sum += a * b; } C[row * Size + j] = sum; } #endif } __device__ void VecAdd_kernel(int *A, int *B, int *C, int size, int baseTid) { int i = baseTid + (threadIdx.x & 0x1f); //printf("In vec add with tid %d from block %d\n",i, blockIdx.x); // for(int j=0; j<200000; j++) if (i < size) C[i] = A[i] + B[i]; } __global__ void deviceRT(volatile int *done, volatile int *totalExecTasks, struct kernel_para_GPU *warpPool, volatile struct kernel_para *taskBuffer, struct kernel_para *taskArgs, volatile int *exec, volatile int *totalScheTasks){ int warpIdxx = (blockIdx.x*blockDim.x + threadIdx.x)/32; int warp; int taskbufIter; int base; int taskbufId; int queuebufIter; int queuebufId; // Init warp queue contents and pointers #if 1 if(threadIdx.x < QSize){ init_queue(warpPool); warp = 0; taskbufIter = 0; queuebufIter = 0; base = 0; } __syncthreads(); #endif // scheduling in master warps if(threadIdx.x < 32) { if(threadIdx.x != 0 && threadIdx.x < (SBuf)){ while(!(*done)){ if(warp > 0){ if(warpPool[queuebufId].warpId == 0){ warpPool[queuebufId].taskId = taskBuffer[taskbufId].taskId; warpPool[queuebufId].baseId = base*32; warpPool[queuebufId].warpId = 1; warp--; base++; __threadfence_block(); if(warp == 0){ taskBuffer[taskbufId].req = 0; base = 0; } }// End if (warpQ->contents) }else{ taskbufId = (blockIdx.x*SBuf+threadIdx.x)+(taskbufIter*BSize*SBuf); queuebufId = (blockIdx.x*SBuf+threadIdx.x)+(queuebufIter*BSize*SBuf); taskbufIter++; queuebufIter++; if(taskbufIter == SRun) taskbufIter = 0; if(queuebufIter == QRun) queuebufIter = 0; if(taskBuffer[taskbufId].ready == 1 && !(*done)){ taskBuffer[taskbufId].ready = 0; warp = taskBuffer[taskbufId].warp; } } // end if warp > 0 }// End while done }// End if(threadIdx.x< QSize) }//End if(threadIdx.x < 32) #if 1 else{ #if 1 while(!(*exec)){ if(*exec) return; if(warpPool[warpIdxx].warpId == 1 && !(*exec)){ MatMul_kernel(taskArgs[warpPool[warpIdxx].taskId].A, taskArgs[warpPool[warpIdxx].taskId].B, (int*)taskArgs[warpPool[warpIdxx].taskId].C, taskArgs[warpPool[warpIdxx].taskId].size, warpPool[warpIdxx].baseId); if((threadIdx.x & 0x1f) == 0){ if((atomicSub((int*)&taskArgs[warpPool[warpIdxx].taskId].doneGPU,1)) ==1){ taskArgs[warpPool[warpIdxx].taskId].doneHost = 0; // printf("Execution:%d, %d\n", warpIdxx, warpPool[warpIdxx].taskId); atomicAdd((int*)&totalExecTasks[blockIdx.x],1); //atomicAdd((int*)&totalScheTasks[0],1); } warpPool[warpIdxx].warpId = 0; __threadfence_block(); } } } #endif }// End else #endif }
1df660fa0aed73f5cb984090f7f09701f5ff70d7.cu
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include "para.h" struct kernel_para{ int *A, *B; volatile int *C; volatile int size; volatile int block; volatile int thread; volatile int warp; volatile int req; volatile int ready; volatile int funcId; volatile int taskId; volatile int doneHost; int doneGPU; }; struct kernel_para_GPU{ int warpId; int baseId; int taskId; }; __device__ void init_queue(struct kernel_para_GPU *warpPool){ int warpIdxx = (blockIdx.x*blockDim.x+threadIdx.x)/32; if((threadIdx.x) != 0){ warpPool[warpIdxx+threadIdx.x].warpId = 0; }else{ warpPool[warpIdxx+threadIdx.x].warpId = 1; } } __device__ void MatMul_kernel(int *A, int *B, int *C, int Size, int baseTid){ #if 1 int row = baseTid + (threadIdx.x & 0x1f); for (int j = 0; j < Size; j++){ int sum = 0; for (int k = 0; k < Size; k++){ int a = A[row * Size + k]; int b = B[k * Size + j]; sum += a * b; } C[row * Size + j] = sum; } #endif } __device__ void VecAdd_kernel(int *A, int *B, int *C, int size, int baseTid) { int i = baseTid + (threadIdx.x & 0x1f); //printf("In vec add with tid %d from block %d\n",i, blockIdx.x); // for(int j=0; j<200000; j++) if (i < size) C[i] = A[i] + B[i]; } __global__ void deviceRT(volatile int *done, volatile int *totalExecTasks, struct kernel_para_GPU *warpPool, volatile struct kernel_para *taskBuffer, struct kernel_para *taskArgs, volatile int *exec, volatile int *totalScheTasks){ int warpIdxx = (blockIdx.x*blockDim.x + threadIdx.x)/32; int warp; int taskbufIter; int base; int taskbufId; int queuebufIter; int queuebufId; // Init warp queue contents and pointers #if 1 if(threadIdx.x < QSize){ init_queue(warpPool); warp = 0; taskbufIter = 0; queuebufIter = 0; base = 0; } __syncthreads(); #endif // scheduling in master warps if(threadIdx.x < 32) { if(threadIdx.x != 0 && threadIdx.x < (SBuf)){ while(!(*done)){ if(warp > 0){ if(warpPool[queuebufId].warpId == 0){ warpPool[queuebufId].taskId = taskBuffer[taskbufId].taskId; warpPool[queuebufId].baseId = base*32; warpPool[queuebufId].warpId = 1; warp--; base++; __threadfence_block(); if(warp == 0){ taskBuffer[taskbufId].req = 0; base = 0; } }// End if (warpQ->contents) }else{ taskbufId = (blockIdx.x*SBuf+threadIdx.x)+(taskbufIter*BSize*SBuf); queuebufId = (blockIdx.x*SBuf+threadIdx.x)+(queuebufIter*BSize*SBuf); taskbufIter++; queuebufIter++; if(taskbufIter == SRun) taskbufIter = 0; if(queuebufIter == QRun) queuebufIter = 0; if(taskBuffer[taskbufId].ready == 1 && !(*done)){ taskBuffer[taskbufId].ready = 0; warp = taskBuffer[taskbufId].warp; } } // end if warp > 0 }// End while done }// End if(threadIdx.x< QSize) }//End if(threadIdx.x < 32) #if 1 else{ #if 1 while(!(*exec)){ if(*exec) return; if(warpPool[warpIdxx].warpId == 1 && !(*exec)){ MatMul_kernel(taskArgs[warpPool[warpIdxx].taskId].A, taskArgs[warpPool[warpIdxx].taskId].B, (int*)taskArgs[warpPool[warpIdxx].taskId].C, taskArgs[warpPool[warpIdxx].taskId].size, warpPool[warpIdxx].baseId); if((threadIdx.x & 0x1f) == 0){ if((atomicSub((int*)&taskArgs[warpPool[warpIdxx].taskId].doneGPU,1)) ==1){ taskArgs[warpPool[warpIdxx].taskId].doneHost = 0; // printf("Execution:%d, %d\n", warpIdxx, warpPool[warpIdxx].taskId); atomicAdd((int*)&totalExecTasks[blockIdx.x],1); //atomicAdd((int*)&totalScheTasks[0],1); } warpPool[warpIdxx].warpId = 0; __threadfence_block(); } } } #endif }// End else #endif }
197a7f54cfdf45850420bf4e5f24d5d09af18258.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* CUDA Implementation for farthest point sampling. Note: AT_ASSERT has become AT_CHECK on master after 0.4. AT_CHECK has become TORCH_CHEC K on master after 1.2. CHECK_EQ, CHECK_GT, etc. are marcos in Pytorch (include ATen.h). Tensor.type() is deprecated and instead use Tensor.options() after 1.5. Tensor.data() is deprecated and instead use Tensor.data_ptr() after 1.5. */ #ifndef _FPS_KERNEL #define _FPS_KERNEL #include <cmath> #include <cstdio> #include <ATen/ATen.h> #include <THH/THH.h> #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) // #define CHECK_EQ(x, y) TORCH_CHECK(x == y, #x " does not equal to " #y) // #define CHECK_GT(x, y) TORCH_CHECK(x > y, #x " is not greater than " #y) #define MAX_THREADS 512 inline int opt_n_threads(int work_size) { const int pow_2 = ::log(static_cast<double>(work_size)) / ::log(2.0); return max(min(1 << pow_2, MAX_THREADS), 1); } #define RUN(BLOCK_SIZE, DIM) \ AT_DISPATCH_FLOATING_TYPES(points.scalar_type(), "FarthestPointSample", ([&] { \ hipLaunchKernelGGL(( FarthestPointSampleKernel<BLOCK_SIZE, DIM, scalar_t, int64_t>) \ , dim3(batch_size), dim3(BLOCK_SIZE), 0, 0, \ index.data_ptr<int64_t>(), \ points.data_ptr<scalar_t>(), \ temp.data_ptr<scalar_t>(), \ num_points, \ num_samples); \ })); #define RUN_DIM(BLOCK_SIZE) \ switch (dim) { \ case 3: \ RUN(BLOCK_SIZE, 3) \ break; \ case 2: \ RUN(BLOCK_SIZE, 2) \ break; \ default: \ printf("Only support dim=2 or 3, but received %ld.\n", dim); \ } #define RUN_BLOCK(BLOCK_SIZE) \ case BLOCK_SIZE: \ RUN_DIM(BLOCK_SIZE) \ break; /* Forward kernel points: (B, N1, D) temp: (B, N1) index: (B, N2) */ template <unsigned int BLOCK_SIZE, unsigned int DIM, typename scalar_t, typename index_t> __global__ void FarthestPointSampleKernel( index_t* __restrict__ index, const scalar_t* __restrict__ points, scalar_t* __restrict__ temp, const int64_t num_points, const int64_t num_samples) { // Allocate shared memory __shared__ scalar_t smem_dist[BLOCK_SIZE]; // Use int to save memory __shared__ int smem_idx[BLOCK_SIZE]; const int batch_idx = blockIdx.x; int cur_idx = 0; int points_offset = batch_idx * num_points * DIM; int temp_offset = batch_idx * num_points; int index_offset = batch_idx * num_samples; // Explicitly choose the first point as a centroid if (threadIdx.x == 0) index[index_offset] = cur_idx; for (int i = 1; i < num_samples; ++i) { scalar_t max_dist = 0.0; int max_idx = cur_idx; int offset1 = cur_idx * DIM; scalar_t coords1[DIM] = {0.0}; #pragma unroll for (int ii = 0; ii < DIM; ++ii) { coords1[ii] = points[points_offset + offset1 + ii]; } for (int j = threadIdx.x; j < num_points; j += BLOCK_SIZE) { int offset2 = j * DIM; scalar_t dist = 0.0; #pragma unroll for (int jj = 0; jj < DIM; ++jj) { scalar_t diff = points[points_offset + offset2 + jj] - coords1[jj]; dist += diff * diff; } scalar_t last_dist = temp[temp_offset + j]; if (last_dist > dist || last_dist < 0.0) { temp[temp_offset + j] = dist; } else { dist = last_dist; } if (dist > max_dist) { max_dist = dist; max_idx = j; } } smem_dist[threadIdx.x] = max_dist; smem_idx[threadIdx.x] = max_idx; // assert block_size == blockDim.x int offset = BLOCK_SIZE / 2; while (offset > 0) { __syncthreads(); if (threadIdx.x < offset) { scalar_t dist1 = smem_dist[threadIdx.x]; scalar_t dist2 = smem_dist[threadIdx.x+offset]; if (dist1 < dist2) { smem_dist[threadIdx.x] = dist2; smem_idx[threadIdx.x] = smem_idx[threadIdx.x+offset]; } } offset /= 2; } __syncthreads(); cur_idx = smem_idx[0]; if (threadIdx.x == 0) index[index_offset + i] = (index_t)cur_idx; } } /* Forward interface Input: points: (B, N1, D) Output: index: (B, N2) */ at::Tensor FarthestPointSample( const at::Tensor points, const int64_t num_samples) { // Sanity check CHECK_INPUT(points); CHECK_EQ(points.dim(), 3); TORCH_CHECK(points.size(2) == 2 || points.size(2) == 3, "Only support dim=2 or dim=3."); CHECK_GT(num_samples, 0); CHECK_GE(points.size(1), num_samples); const auto batch_size = points.size(0); const auto num_points = points.size(1); const auto dim = points.size(2); auto index = at::zeros({batch_size, num_samples}, points.options().dtype(at::kLong)); // In original implementation, it only allocates memory with the size of grid instead of batch size. auto temp = at::neg(at::ones({batch_size, num_points}, points.options())); // In order to make full use of shared memory and threads, // it is recommended to set num_samples to be power of 2. const auto n_threads = opt_n_threads(num_points); switch (n_threads) { RUN_BLOCK(512) RUN_BLOCK(256) RUN_BLOCK(128) RUN_BLOCK(64) RUN_BLOCK(32) RUN_BLOCK(16) default: RUN_DIM(16) } THCudaCheck(hipGetLastError()); return index; } #endif
197a7f54cfdf45850420bf4e5f24d5d09af18258.cu
/* CUDA Implementation for farthest point sampling. Note: AT_ASSERT has become AT_CHECK on master after 0.4. AT_CHECK has become TORCH_CHEC K on master after 1.2. CHECK_EQ, CHECK_GT, etc. are marcos in Pytorch (include ATen.h). Tensor.type() is deprecated and instead use Tensor.options() after 1.5. Tensor.data() is deprecated and instead use Tensor.data_ptr() after 1.5. */ #ifndef _FPS_KERNEL #define _FPS_KERNEL #include <cmath> #include <cstdio> #include <ATen/ATen.h> #include <THC/THC.h> #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) // #define CHECK_EQ(x, y) TORCH_CHECK(x == y, #x " does not equal to " #y) // #define CHECK_GT(x, y) TORCH_CHECK(x > y, #x " is not greater than " #y) #define MAX_THREADS 512 inline int opt_n_threads(int work_size) { const int pow_2 = std::log(static_cast<double>(work_size)) / std::log(2.0); return max(min(1 << pow_2, MAX_THREADS), 1); } #define RUN(BLOCK_SIZE, DIM) \ AT_DISPATCH_FLOATING_TYPES(points.scalar_type(), "FarthestPointSample", ([&] { \ FarthestPointSampleKernel<BLOCK_SIZE, DIM, scalar_t, int64_t> \ <<<batch_size, BLOCK_SIZE>>>( \ index.data_ptr<int64_t>(), \ points.data_ptr<scalar_t>(), \ temp.data_ptr<scalar_t>(), \ num_points, \ num_samples); \ })); #define RUN_DIM(BLOCK_SIZE) \ switch (dim) { \ case 3: \ RUN(BLOCK_SIZE, 3) \ break; \ case 2: \ RUN(BLOCK_SIZE, 2) \ break; \ default: \ printf("Only support dim=2 or 3, but received %ld.\n", dim); \ } #define RUN_BLOCK(BLOCK_SIZE) \ case BLOCK_SIZE: \ RUN_DIM(BLOCK_SIZE) \ break; /* Forward kernel points: (B, N1, D) temp: (B, N1) index: (B, N2) */ template <unsigned int BLOCK_SIZE, unsigned int DIM, typename scalar_t, typename index_t> __global__ void FarthestPointSampleKernel( index_t* __restrict__ index, const scalar_t* __restrict__ points, scalar_t* __restrict__ temp, const int64_t num_points, const int64_t num_samples) { // Allocate shared memory __shared__ scalar_t smem_dist[BLOCK_SIZE]; // Use int to save memory __shared__ int smem_idx[BLOCK_SIZE]; const int batch_idx = blockIdx.x; int cur_idx = 0; int points_offset = batch_idx * num_points * DIM; int temp_offset = batch_idx * num_points; int index_offset = batch_idx * num_samples; // Explicitly choose the first point as a centroid if (threadIdx.x == 0) index[index_offset] = cur_idx; for (int i = 1; i < num_samples; ++i) { scalar_t max_dist = 0.0; int max_idx = cur_idx; int offset1 = cur_idx * DIM; scalar_t coords1[DIM] = {0.0}; #pragma unroll for (int ii = 0; ii < DIM; ++ii) { coords1[ii] = points[points_offset + offset1 + ii]; } for (int j = threadIdx.x; j < num_points; j += BLOCK_SIZE) { int offset2 = j * DIM; scalar_t dist = 0.0; #pragma unroll for (int jj = 0; jj < DIM; ++jj) { scalar_t diff = points[points_offset + offset2 + jj] - coords1[jj]; dist += diff * diff; } scalar_t last_dist = temp[temp_offset + j]; if (last_dist > dist || last_dist < 0.0) { temp[temp_offset + j] = dist; } else { dist = last_dist; } if (dist > max_dist) { max_dist = dist; max_idx = j; } } smem_dist[threadIdx.x] = max_dist; smem_idx[threadIdx.x] = max_idx; // assert block_size == blockDim.x int offset = BLOCK_SIZE / 2; while (offset > 0) { __syncthreads(); if (threadIdx.x < offset) { scalar_t dist1 = smem_dist[threadIdx.x]; scalar_t dist2 = smem_dist[threadIdx.x+offset]; if (dist1 < dist2) { smem_dist[threadIdx.x] = dist2; smem_idx[threadIdx.x] = smem_idx[threadIdx.x+offset]; } } offset /= 2; } __syncthreads(); cur_idx = smem_idx[0]; if (threadIdx.x == 0) index[index_offset + i] = (index_t)cur_idx; } } /* Forward interface Input: points: (B, N1, D) Output: index: (B, N2) */ at::Tensor FarthestPointSample( const at::Tensor points, const int64_t num_samples) { // Sanity check CHECK_INPUT(points); CHECK_EQ(points.dim(), 3); TORCH_CHECK(points.size(2) == 2 || points.size(2) == 3, "Only support dim=2 or dim=3."); CHECK_GT(num_samples, 0); CHECK_GE(points.size(1), num_samples); const auto batch_size = points.size(0); const auto num_points = points.size(1); const auto dim = points.size(2); auto index = at::zeros({batch_size, num_samples}, points.options().dtype(at::kLong)); // In original implementation, it only allocates memory with the size of grid instead of batch size. auto temp = at::neg(at::ones({batch_size, num_points}, points.options())); // In order to make full use of shared memory and threads, // it is recommended to set num_samples to be power of 2. const auto n_threads = opt_n_threads(num_points); switch (n_threads) { RUN_BLOCK(512) RUN_BLOCK(256) RUN_BLOCK(128) RUN_BLOCK(64) RUN_BLOCK(32) RUN_BLOCK(16) default: RUN_DIM(16) } THCudaCheck(cudaGetLastError()); return index; } #endif
f23677b4eff8c2582c5fc5daece4bd536cf3e836.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "efficient.h" #define blockSize 256 #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) int* dev_data; int* dev_oData; int* dev_scanData; int* dev_boolData; namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kern_UpSweep(int n, int* arr, int pow) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } if (index % (2 * pow) == 0) { arr[index + 2 * pow - 1] += arr[index + pow - 1]; } } __global__ void kern_SetRoot(int n, int* arr) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } arr[n - 1] = 0; } __global__ void kern_DownSweep(int n, int* arr, int pow) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } if (index % (2 * pow) == 0) { int temp = arr[index + pow - 1]; arr[index + pow - 1] = arr[index + 2 * pow - 1]; arr[index + 2 * pow - 1] += temp; } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int* odata, const int* idata) { int blocks = ceil((float)n / (float)blockSize); int logN = ilog2ceil(n); const int len = (int)powf(2, logN); hipMalloc((void**)&dev_data, sizeof(int) * (int)powf(2, logN)); hipMemcpy(dev_data, idata, sizeof(int) * n, hipMemcpyHostToDevice); timer().startGpuTimer(); for (int d = 0; d <= logN - 1; d++) { kern_UpSweep << <blocks, blockSize >> > (len, dev_data, (int)powf(2, d)); } kern_SetRoot << <1, 1 >> > (len, dev_data); for (int d = logN - 1; d >= 0; d--) { kern_DownSweep << <blocks, blockSize >> > (len, dev_data, (int)powf(2, d)); } timer().endGpuTimer(); hipMemcpy(odata, dev_data, sizeof(int) * n, hipMemcpyDeviceToHost); hipFree(dev_data); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { int numBlocks = ceil((float)n / (float)blockSize); int logN = ilog2ceil(n); const int len = (int)powf(2, logN); hipMalloc((void**)&dev_data, sizeof(int) * len); hipMalloc((void**)&dev_boolData, sizeof(int) * len); hipMalloc((void**)&dev_oData, sizeof(int) * n); hipMemcpy(dev_data, idata, sizeof(int) * n, hipMemcpyHostToDevice); timer().startGpuTimer(); // TODO -> DONE StreamCompaction::Common::kernMapToBoolean << <numBlocks, blockSize >> > (len, dev_boolData, dev_data); for (int d = 0; d <= logN - 1; d++) { kern_UpSweep << <numBlocks, blockSize >> > (len, dev_boolData, (int)powf(2, d)); } kern_SetRoot << <1, 1 >> > (len, dev_boolData); for (int d = logN - 1; d >= 0; d--) { kern_DownSweep << <numBlocks, blockSize >> > (len, dev_boolData, (int)powf(2, d)); } StreamCompaction::Common::kernScatter << <numBlocks, blockSize >> > (n, dev_oData, dev_data, dev_boolData, nullptr); timer().endGpuTimer(); int* finalBoolArr = new int[n]; hipMemcpy(odata, dev_oData, sizeof(int) * n, hipMemcpyDeviceToHost); hipMemcpy(finalBoolArr, dev_boolData, sizeof(int) * n, hipMemcpyDeviceToHost); hipFree(dev_data); hipFree(dev_boolData); hipFree(dev_oData); if (idata[n - 1] == 0) { return finalBoolArr[n - 1]; } return finalBoolArr[n - 1] + 1; } } }
f23677b4eff8c2582c5fc5daece4bd536cf3e836.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "efficient.h" #define blockSize 256 #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) int* dev_data; int* dev_oData; int* dev_scanData; int* dev_boolData; namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kern_UpSweep(int n, int* arr, int pow) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } if (index % (2 * pow) == 0) { arr[index + 2 * pow - 1] += arr[index + pow - 1]; } } __global__ void kern_SetRoot(int n, int* arr) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } arr[n - 1] = 0; } __global__ void kern_DownSweep(int n, int* arr, int pow) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } if (index % (2 * pow) == 0) { int temp = arr[index + pow - 1]; arr[index + pow - 1] = arr[index + 2 * pow - 1]; arr[index + 2 * pow - 1] += temp; } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int* odata, const int* idata) { int blocks = ceil((float)n / (float)blockSize); int logN = ilog2ceil(n); const int len = (int)powf(2, logN); cudaMalloc((void**)&dev_data, sizeof(int) * (int)powf(2, logN)); cudaMemcpy(dev_data, idata, sizeof(int) * n, cudaMemcpyHostToDevice); timer().startGpuTimer(); for (int d = 0; d <= logN - 1; d++) { kern_UpSweep << <blocks, blockSize >> > (len, dev_data, (int)powf(2, d)); } kern_SetRoot << <1, 1 >> > (len, dev_data); for (int d = logN - 1; d >= 0; d--) { kern_DownSweep << <blocks, blockSize >> > (len, dev_data, (int)powf(2, d)); } timer().endGpuTimer(); cudaMemcpy(odata, dev_data, sizeof(int) * n, cudaMemcpyDeviceToHost); cudaFree(dev_data); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { int numBlocks = ceil((float)n / (float)blockSize); int logN = ilog2ceil(n); const int len = (int)powf(2, logN); cudaMalloc((void**)&dev_data, sizeof(int) * len); cudaMalloc((void**)&dev_boolData, sizeof(int) * len); cudaMalloc((void**)&dev_oData, sizeof(int) * n); cudaMemcpy(dev_data, idata, sizeof(int) * n, cudaMemcpyHostToDevice); timer().startGpuTimer(); // TODO -> DONE StreamCompaction::Common::kernMapToBoolean << <numBlocks, blockSize >> > (len, dev_boolData, dev_data); for (int d = 0; d <= logN - 1; d++) { kern_UpSweep << <numBlocks, blockSize >> > (len, dev_boolData, (int)powf(2, d)); } kern_SetRoot << <1, 1 >> > (len, dev_boolData); for (int d = logN - 1; d >= 0; d--) { kern_DownSweep << <numBlocks, blockSize >> > (len, dev_boolData, (int)powf(2, d)); } StreamCompaction::Common::kernScatter << <numBlocks, blockSize >> > (n, dev_oData, dev_data, dev_boolData, nullptr); timer().endGpuTimer(); int* finalBoolArr = new int[n]; cudaMemcpy(odata, dev_oData, sizeof(int) * n, cudaMemcpyDeviceToHost); cudaMemcpy(finalBoolArr, dev_boolData, sizeof(int) * n, cudaMemcpyDeviceToHost); cudaFree(dev_data); cudaFree(dev_boolData); cudaFree(dev_oData); if (idata[n - 1] == 0) { return finalBoolArr[n - 1]; } return finalBoolArr[n - 1] + 1; } } }
bde455d1820be2eb481b6dd84a5ed6bd2cc4c0a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" __global__ void prod_strided_double(int n, int xOffset,double *dx,int incx,double result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= xOffset && i % incx == 0) result *= dx[i]; } }
bde455d1820be2eb481b6dd84a5ed6bd2cc4c0a1.cu
#include "includes.h" extern "C" __global__ void prod_strided_double(int n, int xOffset,double *dx,int incx,double result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= xOffset && i % incx == 0) result *= dx[i]; } }
57177da8d1472d9ac38daf67d70ad91cca2a9a8d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> /* * Refactor `loop` to be a CUDA Kernel. The new kernel should * only do the work of 1 iteration of the original loop. */ void loop(int N) { for (int i = 0; i < N; ++i) { printf("This is iteration number %d\n", i); } } int main() { /* * When refactoring `loop` to launch as a kernel, be sure * to use the execution configuration to control how many * "iterations" to perform. * * For this exercise, be sure to use more than 1 block in * the execution configuration. */ int N = 10; loop(N); }
57177da8d1472d9ac38daf67d70ad91cca2a9a8d.cu
#include <stdio.h> #include <device_launch_parameters.h> #include <cuda_runtime.h> /* * Refactor `loop` to be a CUDA Kernel. The new kernel should * only do the work of 1 iteration of the original loop. */ void loop(int N) { for (int i = 0; i < N; ++i) { printf("This is iteration number %d\n", i); } } int main() { /* * When refactoring `loop` to launch as a kernel, be sure * to use the execution configuration to control how many * "iterations" to perform. * * For this exercise, be sure to use more than 1 block in * the execution configuration. */ int N = 10; loop(N); }
8c26ad6f9ee19ea3963567e56b13046700c352a5.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/hip_fp16.h> __device__ inline void MyAtomicAdd(float* address, float value) { int oldval, newval, readback; oldval = __float_as_int(*address); newval = __float_as_int(__int_as_float(oldval) + value); while ((readback = atomicCAS((int*)address, oldval, newval)) != oldval) { oldval = readback; newval = __float_as_int(__int_as_float(oldval) + value); } }
8c26ad6f9ee19ea3963567e56b13046700c352a5.cu
#include <stdio.h> #include <stdlib.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda_fp16.h> __device__ inline void MyAtomicAdd(float* address, float value) { int oldval, newval, readback; oldval = __float_as_int(*address); newval = __float_as_int(__int_as_float(oldval) + value); while ((readback = atomicCAS((int*)address, oldval, newval)) != oldval) { oldval = readback; newval = __float_as_int(__int_as_float(oldval) + value); } }
da4f6e483b09b140f7fdedfa170400462ad64316.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/device_functions.h" #include <vector> #ifndef __HIPCC__ #define __HIPCC__ #endif #include "CudaType.h" #define SHAREDSIZE ((16 * 1024)/sizeof(DTYPE)) extern __device__ void __syncthreads(); __global__ void kernelBuffer_propagation(buffer * buffers) { int id_buf = blockIdx.x; int id_coord = threadIdx.x; buffers[id_buf].propagation_valid[id_coord] = buffers[id_buf].propagation_calculated[id_coord]; } __device__ void propagation_perceptron(buffer * input, buffer * output, PCPTR * perceptron) { int id = threadIdx.x; if (id < input->size) output->propagation_calculated[id] = ((DTYPE)1) / (1 + exp(-input->propagation_valid[id] - perceptron->bias[id])); } __device__ void propagation_fullconnection(buffer * input, buffer * output, FC * fconnection) { int id = threadIdx.x; __shared__ DTYPE input_buffer[SHAREDSIZE]; int size_output = output->size; int size_input = input->size; if (id < size_input) { input_buffer[id] = input->propagation_valid[id]; __syncthreads(); DTYPE resultat = input_buffer[0] * fconnection->matrix[id * size_output]; for (int i = 1; i < size_input; i++) resultat += input_buffer[i] * fconnection->matrix[id * size_output + i]; output->propagation_calculated[id] = resultat; } } __global__ void kernelNetwork_propagation(buffer * buffers, node * nodes, FC * fc, PCPTR * pcptr) { int layer = blockIdx.x; buffer * input = buffers + layer; buffer * output = input + 1; switch (nodes[layer].type) { case PERCEPTRON: propagation_perceptron(input, output, pcptr + nodes[layer].indice); break; case FCONNECTION: propagation_fullconnection(input, output, fc + nodes[layer].indice); break; } } void propagation_kernelcall(int node_number, int maxdim, buffer * GPUbuffer_tab, node *GPUnode_tab, FC *GPUFC_tab, PCPTR *GPUPCPTR_tab) { hipLaunchKernelGGL(( kernelNetwork_propagation), dim3(node_number), dim3(maxdim), 0, 0, GPUbuffer_tab, GPUnode_tab, GPUFC_tab, GPUPCPTR_tab); hipLaunchKernelGGL(( kernelBuffer_propagation), dim3(node_number + 1), dim3(maxdim), 0, 0, GPUbuffer_tab); } __device__ void fullconnection_propagationNoPipeline(buffer * input, buffer * output, FC * fc) { int id = threadIdx.x; uint32_t input_size = input->size; uint32_t output_size = output->size; __shared__ DTYPE input_buffer[SHAREDSIZE]; if (id < input_size) input_buffer[id % input_size] = input->propagation_valid[id % input_size]; __syncthreads(); if (id < output_size) { DTYPE resultat = input_buffer[0] * fc->matrix[id]; for (uint32_t i = 1; i < input_size; i++) { resultat += input_buffer[i] * fc->matrix[id + output_size * i]; } output->propagation_valid[id] = resultat; } } __device__ void perceptron_propagationNoPipeline(buffer * input, buffer * output, PCPTR * pcptr) { int id = threadIdx.x ; if (id < input->size) output->propagation_valid[id] = ((DTYPE)1) / (((DTYPE)1) + ((DTYPE)__expf(-input->propagation_valid[id] - pcptr->bias[id]))); } __device__ void propagationNoPipeline(buffer * input, buffer * output, node * node, FC * fc, PCPTR * pcptr) { switch (node->type) { case PERCEPTRON: perceptron_propagationNoPipeline(input, output, &(pcptr[node->indice])); break; case FCONNECTION: fullconnection_propagationNoPipeline(input, output, &(fc[node->indice])); break; } } __device__ void backpropagation_perceptron(buffer * input, buffer * output, PCPTR * node) { int id = threadIdx.x; if (id < input->size) { output->back_propagation[id] = input->back_propagation[id] * input->propagation_valid[id] * ((DTYPE)1 - input->propagation_valid[id]); } } __device__ void backpropagation_fullconnection(buffer * input, buffer * output, FC * node) { int id = threadIdx.x; __shared__ DTYPE input_buffer[SHAREDSIZE]; int size_output = output->size; int size_input = input->size; if (id < size_input) input_buffer[id] = input->back_propagation[id]; __syncthreads(); if (id < size_output) { DTYPE resultat = input_buffer[0] * node->matrix[id * size_input]; for (int i = 1; i < size_input; i++) resultat += input_buffer[i] * node->matrix[id * size_input + i]; output->back_propagation[id] = resultat; } } __device__ void backPropagation(buffer * input, buffer * output, node * node, FC * fc, PCPTR * pcptr, int node_size) { switch (node->type) { case PERCEPTRON: backpropagation_perceptron(input, output, pcptr + node->indice); break; case FCONNECTION: backpropagation_fullconnection(input, output, fc + node->indice); break; } } __device__ void gradient(buffer * resultat, DTYPE * expected) { int id = threadIdx.x; if (id < resultat->size) { resultat->back_propagation[id] = resultat->propagation_valid[id] - expected[id]; } } __global__ void gradient_kernel(buffer * resultat, DTYPE * expected) { int id = threadIdx.x; if (id < resultat->size) { resultat->back_propagation[id] = resultat->propagation_valid[id] - expected[id]; } } __device__ void learn_perceptron(buffer * input, buffer * output, PCPTR * pcptr, DTYPE alpha) { int id = threadIdx.x; if (id < input->size) pcptr->bias[id] -= alpha * input->back_propagation[id]; } __device__ void learn_fullconnection(buffer * input, buffer * output, FC * fc, DTYPE alpha) { int input_size = input->size; int output_size = output->size; int id = threadIdx.x ; __shared__ DTYPE input_prop[SHAREDSIZE / 2]; __shared__ DTYPE output_backprop[SHAREDSIZE / 2]; if (id < output_size) output_backprop[id] = output->back_propagation[id]; if (id < input_size) input_prop[id % input_size] = input->propagation_valid[id % input_size]; __syncthreads(); if (id < output_size) for (int i = 0; i < input_size; i++) fc->matrix[id + output_size * i] -= alpha * input_prop[i] * output_backprop[id]; } __device__ void learn(buffer * input, buffer * output, node * node, FC * fc, PCPTR * pcptr, DTYPE alpha) { switch (node->type) { case PERCEPTRON: learn_perceptron(input, output, pcptr + node->indice, alpha); break; case FCONNECTION: learn_fullconnection(input, output, fc + node->indice, alpha); break; } } __global__ void kernelNetwork_learningNoPipeline(buffer buffers[], node nodes[], FC fc[], PCPTR pcptr[], int node_size, float * expected, DTYPE alpha) { //propagation for (uint32_t i = 0; i < node_size; i++) { propagationNoPipeline(&(buffers[i]), &(buffers[i + 1]), &(nodes[i]), fc, pcptr); __syncthreads(); } //gradient gradient(buffers + node_size, expected); __syncthreads(); //backpropagation for (int32_t i = node_size - 1; i >= 0; i--) { backPropagation(buffers + i + 1, buffers + i, nodes + i, fc, pcptr, node_size); __syncthreads(); } //apprentissage for (uint32_t i = 0; i < node_size; i++) { learn(buffers + i, buffers + i + 1, nodes + i, fc, pcptr, alpha); __syncthreads(); } } __global__ void kernelNetwork_propagationNoPipeline(buffer * buffers, node *nodes, FC *fc, PCPTR *pcptr, int node_size) { for (uint32_t i = 0; i < node_size; i++) { propagationNoPipeline(buffers + i, buffers + i + 1, nodes + i, fc, pcptr); __syncthreads(); } } __global__ void kernelPerceptronLayer_propagation(buffer *input, buffer *output, PCPTR *node) { int id = threadIdx.x; output->propagation_valid[id] = ((DTYPE)1) / (((DTYPE)1) + ((DTYPE)__expf(-input->propagation_valid[id] - node->bias[id]))); } __global__ void kernelFullConnection_propagation(buffer *input, buffer *output, FC *node) { int id = threadIdx.x; uint32_t input_size = input->size; uint32_t output_size = output->size; //__shared__ DTYPE input_buffer[SHAREDSIZE]; //input_buffer[id] = input->propagation_valid[id]; __syncthreads(); /*A paraleliser*/ DTYPE resultat = input->propagation_valid[0] * node->matrix[id]; for (uint32_t i = 1; i < input_size; i++) { resultat += input->propagation_valid[i] * node->matrix[id + output_size * i]; } output->propagation_valid[id] = resultat; } __global__ void kernelPerceptronLayer_backpropagation(buffer *input, buffer *output, PCPTR *node) { int id = threadIdx.x; output->back_propagation[id] = input->back_propagation[id] * input->propagation_valid[id] * ((DTYPE)1 - input->propagation_valid[id]); } __global__ void kernelFullConnection_backpropagation(buffer *input, buffer *output, FC *node) { int id = threadIdx.x; //__shared__ DTYPE input_buffer[SHAREDSIZE]; //int size_output = output->size; int size_input = input->size; //int id_output = id % size_output; //if (id < size_input) // input_buffer[id % size_input] = input->back_propagation[id % size_input]; __syncthreads(); //if (id < size_output) { DTYPE resultat = input->back_propagation[0] * node->matrix[id * size_input]; for (int i = 1; i < size_input; i++) resultat += input->back_propagation[i] * node->matrix[id * size_input + i]; output->back_propagation[id] = resultat; } __global__ void kernelPerceptronLayer_learning(buffer * input, buffer * output, PCPTR * pcptr, DTYPE alpha) { int id = threadIdx.x; pcptr->bias[id] -= alpha * input->back_propagation[id]; } __global__ void kernelFullConnection_learning(buffer * input, buffer * output, FC * fc, DTYPE alpha) { int input_size = input->size; int output_size = output->size; int id = threadIdx.x; /* __shared__ DTYPE input_prop[SHAREDSIZE / 2]; __shared__ DTYPE output_backprop[SHAREDSIZE / 2]; if (id < output_size) output_backprop[id] = output->back_propagation[id]; if (id < input_size) input_prop[id % input_size] = input->propagation_valid[id % input_size]; __syncthreads(); if (id < output_size)*/ for (int i = 0; i < input_size; i++) fc->matrix[id + output_size * i] -= alpha * input->propagation_valid[i] * output->back_propagation[id]; } void propagationNoPipeline_kernelcall(int node_number, int maxdim, buffer * GPUbuffer_tab, node *GPUnode_tab, FC *GPUFC_tab, PCPTR *GPUPCPTR_tab) { kernelNetwork_propagationNoPipeline << <1, maxdim >> >(GPUbuffer_tab, GPUnode_tab, GPUFC_tab, GPUPCPTR_tab, node_number); } void learningNoPipeline_kernelcall(buffer * buffers, node * nodes, FC * fc, PCPTR * pcptr, int node_size, float * expected, DTYPE alpha, int maxdim) { hipLaunchKernelGGL(( kernelNetwork_learningNoPipeline) , dim3(1), dim3(maxdim), 0, 0, buffers, nodes, fc, pcptr, node_size, expected, alpha); } /* il faut que buffer soit decroissant */ void propagationNoPipeline_AtomicKernelcall( buffer * buffers, FC * fc, PCPTR * pcptr, std::vector<type_node> &node_type, std::vector<uint32_t> &index, std::vector<uint32_t> &buffer_size, int node_number) { for (int i = 0; i < node_number; i++) { switch (node_type[i]) { case FCONNECTION: //printf("kernel call : FC\n"); kernelFullConnection_propagation <<<1, buffer_size[i + 1] >> >(buffers + i, buffers + i + 1, fc + index[i]); break; case PERCEPTRON: //printf("kernel call : PERCEP\n"); kernelPerceptronLayer_propagation <<<1, buffer_size[i + 1] >> >(buffers + i, buffers + i + 1, pcptr + index[i]); break; } } } void learningNoPipeline_AtomicKernelcall( buffer * buffers, FC * fc, PCPTR * pcptr, std::vector<type_node> &node_type, std::vector<uint32_t> &index, std::vector<uint32_t> &buffer_size, int node_number, DTYPE * expected, DTYPE alpha) { propagationNoPipeline_AtomicKernelcall(buffers, fc, pcptr, node_type, index, buffer_size, node_number); gradient_kernel << <1, buffer_size[node_number] >> >(buffers + node_number, expected); for (int i = node_number - 1; i >= 0; i--) { switch (node_type[i]) { case FCONNECTION: //printf("kernel call : FC\n"); kernelFullConnection_backpropagation << <1, buffer_size[i] >> >(buffers + i + 1, buffers + i, fc + index[i]); break; case PERCEPTRON: //printf("kernel call : PERCEP\n"); kernelPerceptronLayer_backpropagation << <1, buffer_size[i] >> >(buffers + i + 1, buffers + i, pcptr + index[i]); break; } } for (int i = 0; i < node_number; i++) { switch (node_type[i]) { case FCONNECTION: //printf("kernel call : FC\n"); kernelFullConnection_learning <<<1, buffer_size[i + 1] >> >(buffers + i + 1, buffers + i, fc + index[i], alpha); break; case PERCEPTRON: //printf("kernel call : PERCEP\n"); kernelPerceptronLayer_learning <<<1, buffer_size[i + 1] >> >(buffers + i + 1, buffers + i, pcptr + index[i], alpha); break; } } }
da4f6e483b09b140f7fdedfa170400462ad64316.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.h" #include <vector> #ifndef __CUDACC__ #define __CUDACC__ #endif #include "CudaType.h" #define SHAREDSIZE ((16 * 1024)/sizeof(DTYPE)) extern __device__ void __syncthreads(); __global__ void kernelBuffer_propagation(buffer * buffers) { int id_buf = blockIdx.x; int id_coord = threadIdx.x; buffers[id_buf].propagation_valid[id_coord] = buffers[id_buf].propagation_calculated[id_coord]; } __device__ void propagation_perceptron(buffer * input, buffer * output, PCPTR * perceptron) { int id = threadIdx.x; if (id < input->size) output->propagation_calculated[id] = ((DTYPE)1) / (1 + exp(-input->propagation_valid[id] - perceptron->bias[id])); } __device__ void propagation_fullconnection(buffer * input, buffer * output, FC * fconnection) { int id = threadIdx.x; __shared__ DTYPE input_buffer[SHAREDSIZE]; int size_output = output->size; int size_input = input->size; if (id < size_input) { input_buffer[id] = input->propagation_valid[id]; __syncthreads(); DTYPE resultat = input_buffer[0] * fconnection->matrix[id * size_output]; for (int i = 1; i < size_input; i++) resultat += input_buffer[i] * fconnection->matrix[id * size_output + i]; output->propagation_calculated[id] = resultat; } } __global__ void kernelNetwork_propagation(buffer * buffers, node * nodes, FC * fc, PCPTR * pcptr) { int layer = blockIdx.x; buffer * input = buffers + layer; buffer * output = input + 1; switch (nodes[layer].type) { case PERCEPTRON: propagation_perceptron(input, output, pcptr + nodes[layer].indice); break; case FCONNECTION: propagation_fullconnection(input, output, fc + nodes[layer].indice); break; } } void propagation_kernelcall(int node_number, int maxdim, buffer * GPUbuffer_tab, node *GPUnode_tab, FC *GPUFC_tab, PCPTR *GPUPCPTR_tab) { kernelNetwork_propagation<<<node_number, maxdim>>>(GPUbuffer_tab, GPUnode_tab, GPUFC_tab, GPUPCPTR_tab); kernelBuffer_propagation<<<node_number + 1, maxdim>>>(GPUbuffer_tab); } __device__ void fullconnection_propagationNoPipeline(buffer * input, buffer * output, FC * fc) { int id = threadIdx.x; uint32_t input_size = input->size; uint32_t output_size = output->size; __shared__ DTYPE input_buffer[SHAREDSIZE]; if (id < input_size) input_buffer[id % input_size] = input->propagation_valid[id % input_size]; __syncthreads(); if (id < output_size) { DTYPE resultat = input_buffer[0] * fc->matrix[id]; for (uint32_t i = 1; i < input_size; i++) { resultat += input_buffer[i] * fc->matrix[id + output_size * i]; } output->propagation_valid[id] = resultat; } } __device__ void perceptron_propagationNoPipeline(buffer * input, buffer * output, PCPTR * pcptr) { int id = threadIdx.x ; if (id < input->size) output->propagation_valid[id] = ((DTYPE)1) / (((DTYPE)1) + ((DTYPE)__expf(-input->propagation_valid[id] - pcptr->bias[id]))); } __device__ void propagationNoPipeline(buffer * input, buffer * output, node * node, FC * fc, PCPTR * pcptr) { switch (node->type) { case PERCEPTRON: perceptron_propagationNoPipeline(input, output, &(pcptr[node->indice])); break; case FCONNECTION: fullconnection_propagationNoPipeline(input, output, &(fc[node->indice])); break; } } __device__ void backpropagation_perceptron(buffer * input, buffer * output, PCPTR * node) { int id = threadIdx.x; if (id < input->size) { output->back_propagation[id] = input->back_propagation[id] * input->propagation_valid[id] * ((DTYPE)1 - input->propagation_valid[id]); } } __device__ void backpropagation_fullconnection(buffer * input, buffer * output, FC * node) { int id = threadIdx.x; __shared__ DTYPE input_buffer[SHAREDSIZE]; int size_output = output->size; int size_input = input->size; if (id < size_input) input_buffer[id] = input->back_propagation[id]; __syncthreads(); if (id < size_output) { DTYPE resultat = input_buffer[0] * node->matrix[id * size_input]; for (int i = 1; i < size_input; i++) resultat += input_buffer[i] * node->matrix[id * size_input + i]; output->back_propagation[id] = resultat; } } __device__ void backPropagation(buffer * input, buffer * output, node * node, FC * fc, PCPTR * pcptr, int node_size) { switch (node->type) { case PERCEPTRON: backpropagation_perceptron(input, output, pcptr + node->indice); break; case FCONNECTION: backpropagation_fullconnection(input, output, fc + node->indice); break; } } __device__ void gradient(buffer * resultat, DTYPE * expected) { int id = threadIdx.x; if (id < resultat->size) { resultat->back_propagation[id] = resultat->propagation_valid[id] - expected[id]; } } __global__ void gradient_kernel(buffer * resultat, DTYPE * expected) { int id = threadIdx.x; if (id < resultat->size) { resultat->back_propagation[id] = resultat->propagation_valid[id] - expected[id]; } } __device__ void learn_perceptron(buffer * input, buffer * output, PCPTR * pcptr, DTYPE alpha) { int id = threadIdx.x; if (id < input->size) pcptr->bias[id] -= alpha * input->back_propagation[id]; } __device__ void learn_fullconnection(buffer * input, buffer * output, FC * fc, DTYPE alpha) { int input_size = input->size; int output_size = output->size; int id = threadIdx.x ; __shared__ DTYPE input_prop[SHAREDSIZE / 2]; __shared__ DTYPE output_backprop[SHAREDSIZE / 2]; if (id < output_size) output_backprop[id] = output->back_propagation[id]; if (id < input_size) input_prop[id % input_size] = input->propagation_valid[id % input_size]; __syncthreads(); if (id < output_size) for (int i = 0; i < input_size; i++) fc->matrix[id + output_size * i] -= alpha * input_prop[i] * output_backprop[id]; } __device__ void learn(buffer * input, buffer * output, node * node, FC * fc, PCPTR * pcptr, DTYPE alpha) { switch (node->type) { case PERCEPTRON: learn_perceptron(input, output, pcptr + node->indice, alpha); break; case FCONNECTION: learn_fullconnection(input, output, fc + node->indice, alpha); break; } } __global__ void kernelNetwork_learningNoPipeline(buffer buffers[], node nodes[], FC fc[], PCPTR pcptr[], int node_size, float * expected, DTYPE alpha) { //propagation for (uint32_t i = 0; i < node_size; i++) { propagationNoPipeline(&(buffers[i]), &(buffers[i + 1]), &(nodes[i]), fc, pcptr); __syncthreads(); } //gradient gradient(buffers + node_size, expected); __syncthreads(); //backpropagation for (int32_t i = node_size - 1; i >= 0; i--) { backPropagation(buffers + i + 1, buffers + i, nodes + i, fc, pcptr, node_size); __syncthreads(); } //apprentissage for (uint32_t i = 0; i < node_size; i++) { learn(buffers + i, buffers + i + 1, nodes + i, fc, pcptr, alpha); __syncthreads(); } } __global__ void kernelNetwork_propagationNoPipeline(buffer * buffers, node *nodes, FC *fc, PCPTR *pcptr, int node_size) { for (uint32_t i = 0; i < node_size; i++) { propagationNoPipeline(buffers + i, buffers + i + 1, nodes + i, fc, pcptr); __syncthreads(); } } __global__ void kernelPerceptronLayer_propagation(buffer *input, buffer *output, PCPTR *node) { int id = threadIdx.x; output->propagation_valid[id] = ((DTYPE)1) / (((DTYPE)1) + ((DTYPE)__expf(-input->propagation_valid[id] - node->bias[id]))); } __global__ void kernelFullConnection_propagation(buffer *input, buffer *output, FC *node) { int id = threadIdx.x; uint32_t input_size = input->size; uint32_t output_size = output->size; //__shared__ DTYPE input_buffer[SHAREDSIZE]; //input_buffer[id] = input->propagation_valid[id]; __syncthreads(); /*A paraleliser*/ DTYPE resultat = input->propagation_valid[0] * node->matrix[id]; for (uint32_t i = 1; i < input_size; i++) { resultat += input->propagation_valid[i] * node->matrix[id + output_size * i]; } output->propagation_valid[id] = resultat; } __global__ void kernelPerceptronLayer_backpropagation(buffer *input, buffer *output, PCPTR *node) { int id = threadIdx.x; output->back_propagation[id] = input->back_propagation[id] * input->propagation_valid[id] * ((DTYPE)1 - input->propagation_valid[id]); } __global__ void kernelFullConnection_backpropagation(buffer *input, buffer *output, FC *node) { int id = threadIdx.x; //__shared__ DTYPE input_buffer[SHAREDSIZE]; //int size_output = output->size; int size_input = input->size; //int id_output = id % size_output; //if (id < size_input) // input_buffer[id % size_input] = input->back_propagation[id % size_input]; __syncthreads(); //if (id < size_output) { DTYPE resultat = input->back_propagation[0] * node->matrix[id * size_input]; for (int i = 1; i < size_input; i++) resultat += input->back_propagation[i] * node->matrix[id * size_input + i]; output->back_propagation[id] = resultat; } __global__ void kernelPerceptronLayer_learning(buffer * input, buffer * output, PCPTR * pcptr, DTYPE alpha) { int id = threadIdx.x; pcptr->bias[id] -= alpha * input->back_propagation[id]; } __global__ void kernelFullConnection_learning(buffer * input, buffer * output, FC * fc, DTYPE alpha) { int input_size = input->size; int output_size = output->size; int id = threadIdx.x; /* __shared__ DTYPE input_prop[SHAREDSIZE / 2]; __shared__ DTYPE output_backprop[SHAREDSIZE / 2]; if (id < output_size) output_backprop[id] = output->back_propagation[id]; if (id < input_size) input_prop[id % input_size] = input->propagation_valid[id % input_size]; __syncthreads(); if (id < output_size)*/ for (int i = 0; i < input_size; i++) fc->matrix[id + output_size * i] -= alpha * input->propagation_valid[i] * output->back_propagation[id]; } void propagationNoPipeline_kernelcall(int node_number, int maxdim, buffer * GPUbuffer_tab, node *GPUnode_tab, FC *GPUFC_tab, PCPTR *GPUPCPTR_tab) { kernelNetwork_propagationNoPipeline << <1, maxdim >> >(GPUbuffer_tab, GPUnode_tab, GPUFC_tab, GPUPCPTR_tab, node_number); } void learningNoPipeline_kernelcall(buffer * buffers, node * nodes, FC * fc, PCPTR * pcptr, int node_size, float * expected, DTYPE alpha, int maxdim) { kernelNetwork_learningNoPipeline <<<1, maxdim>>>(buffers, nodes, fc, pcptr, node_size, expected, alpha); } /* il faut que buffer soit decroissant */ void propagationNoPipeline_AtomicKernelcall( buffer * buffers, FC * fc, PCPTR * pcptr, std::vector<type_node> &node_type, std::vector<uint32_t> &index, std::vector<uint32_t> &buffer_size, int node_number) { for (int i = 0; i < node_number; i++) { switch (node_type[i]) { case FCONNECTION: //printf("kernel call : FC\n"); kernelFullConnection_propagation <<<1, buffer_size[i + 1] >> >(buffers + i, buffers + i + 1, fc + index[i]); break; case PERCEPTRON: //printf("kernel call : PERCEP\n"); kernelPerceptronLayer_propagation <<<1, buffer_size[i + 1] >> >(buffers + i, buffers + i + 1, pcptr + index[i]); break; } } } void learningNoPipeline_AtomicKernelcall( buffer * buffers, FC * fc, PCPTR * pcptr, std::vector<type_node> &node_type, std::vector<uint32_t> &index, std::vector<uint32_t> &buffer_size, int node_number, DTYPE * expected, DTYPE alpha) { propagationNoPipeline_AtomicKernelcall(buffers, fc, pcptr, node_type, index, buffer_size, node_number); gradient_kernel << <1, buffer_size[node_number] >> >(buffers + node_number, expected); for (int i = node_number - 1; i >= 0; i--) { switch (node_type[i]) { case FCONNECTION: //printf("kernel call : FC\n"); kernelFullConnection_backpropagation << <1, buffer_size[i] >> >(buffers + i + 1, buffers + i, fc + index[i]); break; case PERCEPTRON: //printf("kernel call : PERCEP\n"); kernelPerceptronLayer_backpropagation << <1, buffer_size[i] >> >(buffers + i + 1, buffers + i, pcptr + index[i]); break; } } for (int i = 0; i < node_number; i++) { switch (node_type[i]) { case FCONNECTION: //printf("kernel call : FC\n"); kernelFullConnection_learning <<<1, buffer_size[i + 1] >> >(buffers + i + 1, buffers + i, fc + index[i], alpha); break; case PERCEPTRON: //printf("kernel call : PERCEP\n"); kernelPerceptronLayer_learning <<<1, buffer_size[i + 1] >> >(buffers + i + 1, buffers + i, pcptr + index[i], alpha); break; } } }
04234c2ef50134f152c6a94da29719589b70f6c6.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <memory> #include <chrono> #include <cstdlib> #include <vector> #include <string> #include "sparseMatrix.h" #include <hip/hip_runtime_api.h> #include <rocblas.h> #include <cusparse_v2.h> #include <thrust/device_vector.h> #include <iomanip> template<typename T> __device__ T warp_reduction(T val) { #define warpSize 32 for (auto offset = warpSize / 2; offset > 0; offset /= 2) { val += __shfl_down_sync(0, val, offset, warpSize); } return val; } template<typename T> __global__ void spMulAdd_scalar(const int * __restrict__ row, const int * __restrict__ col, const T * __restrict__ val, const T * __restrict__ dx, T * __restrict__ dy, int n, int nnz) { auto tid = threadIdx.x + blockIdx.x * blockDim.x; T y_val = 0.0; if (tid < n) { #pragma unroll for (auto j = row[tid]; j < row[tid + 1]; ++j) { y_val += val[j] * dx[col[j]]; } dy[tid] = y_val; } } template<typename T> __global__ void spMulAdd_vector(const int * __restrict__ row, const int * __restrict__ col, const T * __restrict__ val, const T * __restrict__ dx, T * __restrict__ dy, int n, int nnz) { auto tid = threadIdx.x + blockIdx.x * blockDim.x; auto rowid = tid / warpSize; auto lane = tid % warpSize; T y_val = 0; if (rowid < n) { #pragma unroll for (auto i = row[rowid] + lane; i < row[rowid + 1]; i += warpSize) { y_val += val[i] * dx[col[i]]; } y_val = warp_reduction<T>(y_val); } if (lane == 0 && rowid < n) { dy[rowid] = y_val; } } int main(int args, char *argv[]) { // std::string fname; fname = argv[1]; sparseMatrix sp(fname); const auto n = sp.n; const auto nnz = sp.nnz; // xy std::unique_ptr<float[]> host_x(new float[n]); std::unique_ptr<float[]> host_y(new float[n]); for (auto i = 0; i < n; i++) { //host_x[i] = static_cast<float>(rand()) / RAND_MAX; host_x[i] = 1; host_y[i] = 0; } // gpu thrust::device_vector<int> row(n + 1); thrust::device_vector<int> col(nnz); thrust::device_vector<float> val(nnz); thrust::device_vector<float> vec_x(n); thrust::device_vector<float> vec_y(n); thrust::copy_n(sp.row.begin(), n + 1, row.begin()); thrust::copy_n(sp.col.begin(), nnz, col.begin()); thrust::copy_n(sp.val.begin(), nnz, val.begin()); thrust::copy_n(host_x.get(), n, vec_x.begin()); thrust::copy_n(host_y.get(), n, vec_y.begin()); int* rowPtr = thrust::raw_pointer_cast(&(row[0])); int* colPtr = thrust::raw_pointer_cast(&(col[0])); float* valPtr = thrust::raw_pointer_cast(&(val[0])); float* vec_xPtr = thrust::raw_pointer_cast(&(vec_x[0])); float* vec_yPtr = thrust::raw_pointer_cast(&(vec_y[0])); // ? const auto blocksize = 64; const dim3 block(blocksize, 1, 1); const dim3 grid(warpSize * ::ceil(n / static_cast<float>(block.x)), 1, 1); // const auto num_iter = 10; std::vector<double> time_stamp; for (auto i = 0; i < num_iter; i++) { std::chrono::system_clock::time_point start, end; start = std::chrono::system_clock::now(); // hipLaunchKernelGGL(( spMulAdd_vector<float>) , dim3(grid), dim3(block), 0, 0, rowPtr, colPtr, valPtr, vec_xPtr, vec_yPtr, n, nnz); end = std::chrono::system_clock::now(); time_stamp.push_back(static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(end - start).count())); } // cpu std::unique_ptr<float[]> result(new float[n]); thrust::copy_n(vec_y.begin(), n, result.get()); std::unique_ptr<float[]> host_result(new float[n]); for (auto i = 0; i < n; i++) { host_result[i] = 0; for (auto j = sp.row[i]; j < sp.row[i + 1]; j++) { host_result[i] += sp.val[j] * host_x[sp.col[j]]; } } auto residual = 0; auto y_norm = 0; for (auto i = 0; i < n; i++) { residual += ::pow(host_result[i] - result[i], 2); y_norm += ::pow(result[i], 2); } residual = std::sqrt(residual); y_norm = std::sqrt(y_norm); // float /* const auto m = 7 - std::log10(n); if (residual / y_norm < m) { std::cout << "ok" << std::endl; } else { std::cout << "ng" << std::endl; } // cuSPARSE ::hipsparseHandle_t cusparse; ::hipsparseCreate(&cusparse); ::hipsparseMatDescr_t matDescr; ::hipsparseCreateMatDescr(&matDescr); ::hipsparseSetMatType(matDescr, HIPSPARSE_MATRIX_TYPE_GENERAL); ::hipsparseSetMatIndexBase(matDescr, HIPSPARSE_INDEX_BASE_ZERO); thrust::device_vector<float> result_cu(n); thrust::copy_n(host_y.get(), n, result_cu.begin()); float* result_cuPtr = thrust::raw_pointer_cast(&(result_cu[0])); const float ALPHA = 1; const float BETA = 0; std::vector<double> time_stamp_cublas; for (auto i = 0; i < num_iter; i++) { std::chrono::system_clock::time_point start_cublas, end_cublas; start_cublas = std::chrono::system_clock::now(); ::hipsparseScsrmv(cusparse, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, n, nnz, &ALPHA, matDescr, valPtr, rowPtr, colPtr, vec_xPtr, &BETA, result_cuPtr); end_cublas = std::chrono::system_clock::now(); time_stamp_cublas.push_back(static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(end_cublas - start_cublas).count())); } std::unique_ptr<float[]> result_cu_host(new float[n]); thrust::copy_n(result_cu.begin(), n, result_cu_host.get()); */ // () const auto median_it = time_stamp.begin() + time_stamp.size() / 2; std::nth_element(time_stamp.begin(), median_it, time_stamp.end()); const auto time = *median_it; const auto median_it_cublas = time_stamp_cublas.begin() + time_stamp_cublas.size() / 2; std::nth_element(time_stamp_cublas.begin(), median_it_cublas, time_stamp_cublas.end()); const auto time_cublas = *median_it_cublas; const auto flops = 2 * nnz; const auto bytes = (n + 1) * sizeof(int) + nnz * sizeof(float) + nnz * sizeof(int) + 3 * n * sizeof(float); std::cout << "matrix: " << fname << std::endl; std::cout << "n: " << n << ", nnz: " << nnz << ", threads: " << blocksize << std::endl; std::cout << "time: " << time << " [msec]" << std::endl; //std::cout << "time(cublas): " << time_cublas << " [msec]" << std::endl; std::cout << "perf: " << flops / time / 1e6 << " [Gflops/sec]" << std::endl; //std::cout << "perf(cublas): " << flops / time_cublas / 1e6 << " [Gflops/sec]" << std::endl; std::cout << "perf: " << bytes / time / 1e6 << " [Gbytes/sec]" << std::endl; //std::cout << "perf(cublas): " << bytes / time_cublas / 1e6 << " [Gbytes/sec]" << std::endl; std::cout << "residual norm 2: " << residual / y_norm << std::endl; // std::cout << fname << "," << std::fixed << std::setprecision(15) << time << "," << time_cublas << "," << flops / time / 1e6 << "," << flops / time_cublas / 1e6 << "," << bytes / time / 1e6 << "," << bytes / time_cublas / 1e6 << std::endl; return 0; }
04234c2ef50134f152c6a94da29719589b70f6c6.cu
#include <iostream> #include <memory> #include <chrono> #include <cstdlib> #include <vector> #include <string> #include "sparseMatrix.h" #include <cuda_runtime_api.h> #include <cublas_v2.h> #include <cusparse_v2.h> #include <thrust/device_vector.h> #include <iomanip> template<typename T> __device__ T warp_reduction(T val) { #define warpSize 32 for (auto offset = warpSize / 2; offset > 0; offset /= 2) { val += __shfl_down_sync(0, val, offset, warpSize); } return val; } template<typename T> __global__ void spMulAdd_scalar(const int * __restrict__ row, const int * __restrict__ col, const T * __restrict__ val, const T * __restrict__ dx, T * __restrict__ dy, int n, int nnz) { auto tid = threadIdx.x + blockIdx.x * blockDim.x; T y_val = 0.0; if (tid < n) { #pragma unroll for (auto j = row[tid]; j < row[tid + 1]; ++j) { y_val += val[j] * dx[col[j]]; } dy[tid] = y_val; } } template<typename T> __global__ void spMulAdd_vector(const int * __restrict__ row, const int * __restrict__ col, const T * __restrict__ val, const T * __restrict__ dx, T * __restrict__ dy, int n, int nnz) { auto tid = threadIdx.x + blockIdx.x * blockDim.x; auto rowid = tid / warpSize; auto lane = tid % warpSize; T y_val = 0; if (rowid < n) { #pragma unroll for (auto i = row[rowid] + lane; i < row[rowid + 1]; i += warpSize) { y_val += val[i] * dx[col[i]]; } y_val = warp_reduction<T>(y_val); } if (lane == 0 && rowid < n) { dy[rowid] = y_val; } } int main(int args, char *argv[]) { // 読み込みたい行列は実行時引数で与える std::string fname; fname = argv[1]; sparseMatrix sp(fname); const auto n = sp.n; const auto nnz = sp.nnz; // ベクトルxとベクトルyを作るところ std::unique_ptr<float[]> host_x(new float[n]); std::unique_ptr<float[]> host_y(new float[n]); for (auto i = 0; i < n; i++) { //host_x[i] = static_cast<float>(rand()) / RAND_MAX; host_x[i] = 1; host_y[i] = 0; } // gpu用ので配列を生成 thrust::device_vector<int> row(n + 1); thrust::device_vector<int> col(nnz); thrust::device_vector<float> val(nnz); thrust::device_vector<float> vec_x(n); thrust::device_vector<float> vec_y(n); thrust::copy_n(sp.row.begin(), n + 1, row.begin()); thrust::copy_n(sp.col.begin(), nnz, col.begin()); thrust::copy_n(sp.val.begin(), nnz, val.begin()); thrust::copy_n(host_x.get(), n, vec_x.begin()); thrust::copy_n(host_y.get(), n, vec_y.begin()); int* rowPtr = thrust::raw_pointer_cast(&(row[0])); int* colPtr = thrust::raw_pointer_cast(&(col[0])); float* valPtr = thrust::raw_pointer_cast(&(val[0])); float* vec_xPtr = thrust::raw_pointer_cast(&(vec_x[0])); float* vec_yPtr = thrust::raw_pointer_cast(&(vec_y[0])); // スレッドサイズはどう決めるのがよいのだろうか? const auto blocksize = 64; const dim3 block(blocksize, 1, 1); const dim3 grid(warpSize * std::ceil(n / static_cast<float>(block.x)), 1, 1); // 時間計測するところ const auto num_iter = 10; std::vector<double> time_stamp; for (auto i = 0; i < num_iter; i++) { std::chrono::system_clock::time_point start, end; start = std::chrono::system_clock::now(); // 計算するところ spMulAdd_vector<float> <<<grid, block>>>(rowPtr, colPtr, valPtr, vec_xPtr, vec_yPtr, n, nnz); end = std::chrono::system_clock::now(); time_stamp.push_back(static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(end - start).count())); } // 結果があっているかcpuでも計算して確認するところ std::unique_ptr<float[]> result(new float[n]); thrust::copy_n(vec_y.begin(), n, result.get()); std::unique_ptr<float[]> host_result(new float[n]); for (auto i = 0; i < n; i++) { host_result[i] = 0; for (auto j = sp.row[i]; j < sp.row[i + 1]; j++) { host_result[i] += sp.val[j] * host_x[sp.col[j]]; } } auto residual = 0; auto y_norm = 0; for (auto i = 0; i < n; i++) { residual += std::pow(host_result[i] - result[i], 2); y_norm += std::pow(result[i], 2); } residual = std::sqrt(residual); y_norm = std::sqrt(y_norm); // float で誤差含めてだいたいこのくらい合ってれば正しい? /* const auto m = 7 - std::log10(n); if (residual / y_norm < m) { std::cout << "ok" << std::endl; } else { std::cout << "ng" << std::endl; } // cuSPARSE ::cusparseHandle_t cusparse; ::cusparseCreate(&cusparse); ::cusparseMatDescr_t matDescr; ::cusparseCreateMatDescr(&matDescr); ::cusparseSetMatType(matDescr, CUSPARSE_MATRIX_TYPE_GENERAL); ::cusparseSetMatIndexBase(matDescr, CUSPARSE_INDEX_BASE_ZERO); thrust::device_vector<float> result_cu(n); thrust::copy_n(host_y.get(), n, result_cu.begin()); float* result_cuPtr = thrust::raw_pointer_cast(&(result_cu[0])); const float ALPHA = 1; const float BETA = 0; std::vector<double> time_stamp_cublas; for (auto i = 0; i < num_iter; i++) { std::chrono::system_clock::time_point start_cublas, end_cublas; start_cublas = std::chrono::system_clock::now(); ::cusparseScsrmv(cusparse, CUSPARSE_OPERATION_NON_TRANSPOSE, n, n, nnz, &ALPHA, matDescr, valPtr, rowPtr, colPtr, vec_xPtr, &BETA, result_cuPtr); end_cublas = std::chrono::system_clock::now(); time_stamp_cublas.push_back(static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(end_cublas - start_cublas).count())); } std::unique_ptr<float[]> result_cu_host(new float[n]); thrust::copy_n(result_cu.begin(), n, result_cu_host.get()); */ // 計算時間(データ転送含めない?)や次数、実効性能を出力 const auto median_it = time_stamp.begin() + time_stamp.size() / 2; std::nth_element(time_stamp.begin(), median_it, time_stamp.end()); const auto time = *median_it; const auto median_it_cublas = time_stamp_cublas.begin() + time_stamp_cublas.size() / 2; std::nth_element(time_stamp_cublas.begin(), median_it_cublas, time_stamp_cublas.end()); const auto time_cublas = *median_it_cublas; const auto flops = 2 * nnz; const auto bytes = (n + 1) * sizeof(int) + nnz * sizeof(float) + nnz * sizeof(int) + 3 * n * sizeof(float); std::cout << "matrix: " << fname << std::endl; std::cout << "n: " << n << ", nnz: " << nnz << ", threads: " << blocksize << std::endl; std::cout << "time: " << time << " [msec]" << std::endl; //std::cout << "time(cublas): " << time_cublas << " [msec]" << std::endl; std::cout << "perf: " << flops / time / 1e6 << " [Gflops/sec]" << std::endl; //std::cout << "perf(cublas): " << flops / time_cublas / 1e6 << " [Gflops/sec]" << std::endl; std::cout << "perf: " << bytes / time / 1e6 << " [Gbytes/sec]" << std::endl; //std::cout << "perf(cublas): " << bytes / time_cublas / 1e6 << " [Gbytes/sec]" << std::endl; std::cout << "residual norm 2: " << residual / y_norm << std::endl; // std::cout << fname << "," << std::fixed << std::setprecision(15) << time << "," << time_cublas << "," << flops / time / 1e6 << "," << flops / time_cublas / 1e6 << "," << bytes / time / 1e6 << "," << bytes / time_cublas / 1e6 << std::endl; return 0; }
0b3a94bc0ebc0ec6670a97b3af08e7181420ed1a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "THHHalf.h" #include "THHHalfAutoNumerics.cuh" #include "THHAtomics.cuh" #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit #define START_IND(a,b,c) (int)floor((float)(a * c) / b) #define END_IND(a,b,c) (int)ceil((float)((a + 1) * c) / b) // #define START_IND(a,b,c) a * c / b // #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0 // 5d tensor B x D x T x H x W /* * Description: * this function adaptively maxpools an input 4D tensor along dimensions 2 and 3 * 4D input, 4D output, 4D argmax x and y */ template <typename T> __global__ void cunn_VolumetricAdaptiveMaxPooling_updateOutput_kernel( T *input, T *output, THCIndex_t *indices, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW, int64_t offsetZ) { // iterators on output pixels int ot, oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; ot = o_plane % osizeT; // output frame/time int d = o_plane / osizeT; // slice/feature // input frame/time ramge is fixed. int istartT = START_IND(ot, osizeT, isizeT); int iendT = END_IND(ot, osizeT, isizeT); int kT = iendT - istartT; // input offset by slice/feature and earliest relevant frame/time T *input_dt = input + d*istrideD + istartT*istrideT; // output offset by slice/feature and frame/time T *output_dt = output + o_plane*osizeH*osizeW; // indices offset by slice/feature and frame/time THCIndex_t *indices_dt = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling from corresponding input pixels T *ptr_input = input_dt + istartH*istrideH + istartW*istrideW; T *ptr_output = output_dt + oh*osizeW + ow; THCIndex_t *ptr_ind = indices_dt + oh*osizeW + ow; int64_t argmax = -1; T max = THCNumerics<T>::min(); int it, ih, iw; for(it = 0; it < kT; ++it) { for(ih = 0; ih < kH; ++ih) { for(iw = 0; iw < kW; ++iw) { T val = ptr_input[ih*istrideH + iw*istrideW]; if (val > max) { max = val; argmax = (it+istartT)*isizeH*isizeW + (ih+istartH)*isizeW + iw+istartW; } } } ptr_input += istrideT; // next input frame } // Update output and argmax *ptr_output = max; *ptr_ind = argmax + TH_INDEX_BASE; } } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). * * Assumes that input size can be perfectly divided by output size, i.e. * each input pixel can only be argmax of one output pixel. */ template <typename T> __global__ void cunn_VolumetricAdaptiveMaxPooling_updateGradInput_kernel( T *gradInput, T *gradOutput, THCIndex_t *indices, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ ) { // iterators on output pixels int oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; int d = o_plane / osizeT; // output slice/feature // gradInput offset by slice/feature T *gradInput_d = gradInput + d*isizeT*isizeH*isizeW; // gradOutput offset by slice/feature and frame/otme T *gradOutput_dt = gradOutput + o_plane*osizeH*osizeW; // indices offset by slice/feature and frame/otme THCIndex_t *indices_dt = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { // Compute the gradients for the argmax input pixel T *ptr_gradOutput = gradOutput_dt + oh*osizeW + ow; THCIndex_t *ptr_ind = indices_dt + oh*osizeW + ow; T grad_delta = *ptr_gradOutput; int argmax = (*ptr_ind) - TH_INDEX_BASE; gradInput_d[argmax] += grad_delta; } } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). * * Uses atomic add. */ template <typename T> __global__ void cunn_atomic_VolumetricAdaptiveMaxPooling_updateGradInput_kernel( T *gradInput, T *gradOutput, THCIndex_t *indices, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ ) { // iterators on output pixels int oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; int d = o_plane / osizeT; // output slice/feature // gradInput offset by slice/feature T *gradInput_d = gradInput + d*isizeT*isizeH*isizeW; // gradOutput offset by slice/feature and frame/otme T *gradOutput_dt = gradOutput + o_plane*osizeH*osizeW; // indices offset by slice/feature and frame/otme THCIndex_t *indices_dt = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { // Compute the gradients for the argmax input pixel T *ptr_gradOutput = gradOutput_dt + oh*osizeW + ow; THCIndex_t *ptr_ind = indices_dt + oh*osizeW + ow; T grad_delta = *ptr_gradOutput; int64_t argmax = (*ptr_ind) - TH_INDEX_BASE; atomicAdd(&(gradInput_d[argmax]), grad_delta); } } } #include "generic/VolumetricAdaptiveMaxPooling.cu" #include "THHGenerateFloatTypes.h" #undef CUDA_MAX_THREADS
0b3a94bc0ebc0ec6670a97b3af08e7181420ed1a.cu
#include "THCUNN.h" #include "THCHalf.h" #include "THCHalfAutoNumerics.cuh" #include "THCAtomics.cuh" #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit #define START_IND(a,b,c) (int)floor((float)(a * c) / b) #define END_IND(a,b,c) (int)ceil((float)((a + 1) * c) / b) // #define START_IND(a,b,c) a * c / b // #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0 // 5d tensor B x D x T x H x W /* * Description: * this function adaptively maxpools an input 4D tensor along dimensions 2 and 3 * 4D input, 4D output, 4D argmax x and y */ template <typename T> __global__ void cunn_VolumetricAdaptiveMaxPooling_updateOutput_kernel( T *input, T *output, THCIndex_t *indices, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW, int64_t offsetZ) { // iterators on output pixels int ot, oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; ot = o_plane % osizeT; // output frame/time int d = o_plane / osizeT; // slice/feature // input frame/time ramge is fixed. int istartT = START_IND(ot, osizeT, isizeT); int iendT = END_IND(ot, osizeT, isizeT); int kT = iendT - istartT; // input offset by slice/feature and earliest relevant frame/time T *input_dt = input + d*istrideD + istartT*istrideT; // output offset by slice/feature and frame/time T *output_dt = output + o_plane*osizeH*osizeW; // indices offset by slice/feature and frame/time THCIndex_t *indices_dt = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling from corresponding input pixels T *ptr_input = input_dt + istartH*istrideH + istartW*istrideW; T *ptr_output = output_dt + oh*osizeW + ow; THCIndex_t *ptr_ind = indices_dt + oh*osizeW + ow; int64_t argmax = -1; T max = THCNumerics<T>::min(); int it, ih, iw; for(it = 0; it < kT; ++it) { for(ih = 0; ih < kH; ++ih) { for(iw = 0; iw < kW; ++iw) { T val = ptr_input[ih*istrideH + iw*istrideW]; if (val > max) { max = val; argmax = (it+istartT)*isizeH*isizeW + (ih+istartH)*isizeW + iw+istartW; } } } ptr_input += istrideT; // next input frame } // Update output and argmax *ptr_output = max; *ptr_ind = argmax + TH_INDEX_BASE; } } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). * * Assumes that input size can be perfectly divided by output size, i.e. * each input pixel can only be argmax of one output pixel. */ template <typename T> __global__ void cunn_VolumetricAdaptiveMaxPooling_updateGradInput_kernel( T *gradInput, T *gradOutput, THCIndex_t *indices, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ ) { // iterators on output pixels int oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; int d = o_plane / osizeT; // output slice/feature // gradInput offset by slice/feature T *gradInput_d = gradInput + d*isizeT*isizeH*isizeW; // gradOutput offset by slice/feature and frame/otme T *gradOutput_dt = gradOutput + o_plane*osizeH*osizeW; // indices offset by slice/feature and frame/otme THCIndex_t *indices_dt = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { // Compute the gradients for the argmax input pixel T *ptr_gradOutput = gradOutput_dt + oh*osizeW + ow; THCIndex_t *ptr_ind = indices_dt + oh*osizeW + ow; T grad_delta = *ptr_gradOutput; int argmax = (*ptr_ind) - TH_INDEX_BASE; gradInput_d[argmax] += grad_delta; } } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). * * Uses atomic add. */ template <typename T> __global__ void cunn_atomic_VolumetricAdaptiveMaxPooling_updateGradInput_kernel( T *gradInput, T *gradOutput, THCIndex_t *indices, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ ) { // iterators on output pixels int oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; int d = o_plane / osizeT; // output slice/feature // gradInput offset by slice/feature T *gradInput_d = gradInput + d*isizeT*isizeH*isizeW; // gradOutput offset by slice/feature and frame/otme T *gradOutput_dt = gradOutput + o_plane*osizeH*osizeW; // indices offset by slice/feature and frame/otme THCIndex_t *indices_dt = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { // Compute the gradients for the argmax input pixel T *ptr_gradOutput = gradOutput_dt + oh*osizeW + ow; THCIndex_t *ptr_ind = indices_dt + oh*osizeW + ow; T grad_delta = *ptr_gradOutput; int64_t argmax = (*ptr_ind) - TH_INDEX_BASE; atomicAdd(&(gradInput_d[argmax]), grad_delta); } } } #include "generic/VolumetricAdaptiveMaxPooling.cu" #include "THCGenerateFloatTypes.h" #undef CUDA_MAX_THREADS
43d64019d7b5832cb57bfe7779a1942b984d4258.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "ten_tusscher_2004_epi_S2_15.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes); check_cuda_error( hipPeekAtLastError() ); hipDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice)); } hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( hipPeekAtLastError() ); check_cuda_error(hipFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { /* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M *((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H *((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J *((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs *((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S *((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R *((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D *((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F *((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa *((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G *((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai *((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai *((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.4855897827032,0.00131304048220611,0.777673256476332,0.777530419758781,0.000176915007944578,0.484229873407731,0.00295766051225702,0.999998320538839,1.96031195718503e-08,1.91202485653593e-05,0.999769095072611,1.00710495848039,0.999995509954569,4.49502542744173e-05,0.671374359732192,10.7525810292738,138.733913720923}; for (uint32_t i = 0; i < NEQ; i++) *((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i]; } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt); *((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id); for(int i = 0; i < NEQ; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv + pitch * 0) + threadID_); real sm = *((real*)((char*)sv + pitch * 1) + threadID_); real sh = *((real*)((char*)sv + pitch * 2) + threadID_); real sj = *((real*)((char*)sv + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv + pitch * 6) + threadID_); real ss = *((real*)((char*)sv + pitch * 7) + threadID_); real sr = *((real*)((char*)sv + pitch * 8) + threadID_); real sd = *((real*)((char*)sv + pitch * 9) + threadID_); real sf = *((real*)((char*)sv + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv + pitch * 11) + threadID_); real sg = *((real*)((char*)sv + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL //real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito ///#ifdef EPI real Gto=0.294; ///#endif ///#ifdef ENDO /// real Gto=0.073; ///#endif ///#ifdef MCELL /// real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; // Setting Elnaz's parameters real parameters []={14.7328911543692,0.000268220573270157,0.000159694102989303,4.95038560493972e-05,0.281222894359262,0.155491530964224,0.222154844407151,4.08947089393252,0.0209965622527636,1.02972284723443,1096.92640050885,0.000622419783707689,0.0929425682382634,0.0199277276192553,0.00362501998690467,4.31336229850729e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; /// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
43d64019d7b5832cb57bfe7779a1942b984d4258.cu
#include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "ten_tusscher_2004_epi_S2_15.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes); check_cuda_error( cudaPeekAtLastError() ); cudaDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice)); } solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( cudaPeekAtLastError() ); check_cuda_error(cudaFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { /* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M *((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H *((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J *((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs *((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S *((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R *((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D *((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F *((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa *((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G *((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai *((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai *((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.4855897827032,0.00131304048220611,0.777673256476332,0.777530419758781,0.000176915007944578,0.484229873407731,0.00295766051225702,0.999998320538839,1.96031195718503e-08,1.91202485653593e-05,0.999769095072611,1.00710495848039,0.999995509954569,4.49502542744173e-05,0.671374359732192,10.7525810292738,138.733913720923}; for (uint32_t i = 0; i < NEQ; i++) *((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i]; } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt); *((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id); for(int i = 0; i < NEQ; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv + pitch * 0) + threadID_); real sm = *((real*)((char*)sv + pitch * 1) + threadID_); real sh = *((real*)((char*)sv + pitch * 2) + threadID_); real sj = *((real*)((char*)sv + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv + pitch * 6) + threadID_); real ss = *((real*)((char*)sv + pitch * 7) + threadID_); real sr = *((real*)((char*)sv + pitch * 8) + threadID_); real sd = *((real*)((char*)sv + pitch * 9) + threadID_); real sf = *((real*)((char*)sv + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv + pitch * 11) + threadID_); real sg = *((real*)((char*)sv + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL //real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito ///#ifdef EPI real Gto=0.294; ///#endif ///#ifdef ENDO /// real Gto=0.073; ///#endif ///#ifdef MCELL /// real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; // Setting Elnaz's parameters real parameters []={14.7328911543692,0.000268220573270157,0.000159694102989303,4.95038560493972e-05,0.281222894359262,0.155491530964224,0.222154844407151,4.08947089393252,0.0209965622527636,1.02972284723443,1096.92640050885,0.000622419783707689,0.0929425682382634,0.0199277276192553,0.00362501998690467,4.31336229850729e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; /// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
dbbb2ed4cae6c4117002bb6ce4a143695651d956.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <dacrt/dacrt.h> #include <util/cutimer.h> #include <util/util.h> extern "C" void dacrtCompleteRender(ParallelPack& pack, TriangleArray& dev_triangles, RayArray& dev_rays, DacrtRunTimeParameters& rtparams, Counters& ctr); extern "C" __global__ void updateMinKernel(int* ray_id, float* min_hits, int* minhit_ids, float* global_min, int* global_hits, int num_rays); extern "C" __global__ void segmentedBruteForce(RayArray rays, TriangleArray triangles, int* buffered_ray_ids, int ray_buffer_occupied, int* buffered_tri_ids, int tri_buffer_occupied, int* ray_segment_sizes, int* tri_segment_sizes, int* ray_segment_start, int* tri_segment_start, int num_segments, float* maxts, int* hitids, int num_threads_launched, int num_blocks_launched); /** CELL BASED VERSION ****************** Idea: If I can somehow combine the two sorting operations on both the left and right child of a single node, then I will be saving twice the savings in sort operations Expected Difficulties: How are we going to maintain state info across the nodes now? We need detailed plans TODO: Add detailed comments below before doing any operation */ struct Cell { AABB parent, left, right; int ptpivot, prpivot, ltpivot, rtpivot, lrpivot, rrpivot; // p*pivot => parent pivot; l*pivot=>left child ; r*pivot => right child thrust::device_vector<int> ray_idx; thrust::device_vector<int> triangle_idx; //thrust::device_vector<int> ray_occupancy; //thrust::device_vector<int> tri_occupancy; Cell() { ptpivot = prpivot = ltpivot = rtpivot = lrpivot = rrpivot = 0; } }; struct BruteForceCell { int tricnt, raycnt; thrust::device_vector<int> ray_idx; thrust::device_vector<int> triangle_idx; }; // cell filter kernels /// NOTE: Key idea here is to assign values so that we can effectively split the left and right in one pass /// So, left = 1, right = 3 and both = 2 __global__ void triCellFilterKernel(AABB left, AABB right, float3* v0, float3* v1, float3* v2, int* tri_idx, int num_tris, int* occupancy) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < num_tris) { /// NOTE : float3 lcentroid = (left.bmin + left.bmax) * 0.5f; float3 rcentroid = (right.bmin + right.bmax) * 0.5f; float3 lextents = left.bmax - left.bmin; float3 rextents = right.bmax - right.bmin; int triangle_id = tri_idx[tid]; float triverts[3][3] = {{v0[triangle_id].x, v0[triangle_id].y, v0[triangle_id].z}, {v1[triangle_id].x, v1[triangle_id].y, v1[triangle_id].z}, {v2[triangle_id].x, v2[triangle_id].y, v2[triangle_id].z}}; float lboxhalf[3] = {lextents.x * 0.5f, lextents.y * 0.5f, lextents.z * 0.5f}; float rboxhalf[3] = {rextents.x * 0.5f, rextents.y * 0.5f, rextents.z * 0.5f}; float lboxcenter[3] = {lcentroid.x, lcentroid.y, lcentroid.z}; float rboxcenter[3] = {rcentroid.x, rcentroid.y, rcentroid.z}; /// TODO: Can we replace this costly test with the simpler test? Any jump in total performance and not only this small step. int lo = triBoxOverlap(lboxcenter, lboxhalf, triverts); int ro = triBoxOverlap(rboxcenter, rboxhalf, triverts); //if(lo == 1 && ro == 1) printf("Hey : threadIdx : %d\n", tid); int val = lo && ro ? 2 : (lo ? 1 : 3); occupancy[tid] = val; } }; __global__ void rayCellFilterKernel(AABB left, AABB right, float3* o, float3* dir, int* ray_ids, int num_rays, int* occupancy) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < num_rays) { int ray_id = ray_ids[tid]; Ray ray(o[ray_id], dir[ray_id]); float thit; bool lo = left.rayIntersect(ray, thit); bool ro = right.rayIntersect(ray, thit); // left = 1, right = 3, both = 2 and no_hit = 4 int val = lo && ro ? 2 : (lo ? 1 : (ro ? 3 : 4)); occupancy[tid] = val; } } // we call this function to complete the parallel brute force. We've written this as a function call so that we can use it elsewhere extern "C" void completeBruteForce(ParallelPack& pack, TriangleArray& d_triangles, RayArray& d_rays, DacrtRunTimeParameters& rtparams, Counters& ctr) { thrust::device_vector<int> ray_segment_start(pack.num_segments); thrust::device_vector<int> tri_segment_start(pack.num_segments); thrust::exclusive_scan(pack.tri_segment_sizes.begin(), pack.tri_segment_sizes.begin() + pack.num_segments, tri_segment_start.begin()); thrust::exclusive_scan(pack.ray_segment_sizes.begin(), pack.ray_segment_sizes.begin() + pack.num_segments, ray_segment_start.begin()); int num_blocks = pack.num_segments; int num_threads_per_block = rtparams.NUM_RAYS_PER_BLOCK; Timer segtimer("Segmented timer"); segtimer.start(); hipLaunchKernelGGL(( segmentedBruteForce), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_rays, d_triangles, thrust::raw_pointer_cast(&pack.buffered_ray_idx[0]), pack.ray_buffer_occupied, thrust::raw_pointer_cast(&pack.buffered_tri_idx[0]), pack.tri_buffer_occupied, thrust::raw_pointer_cast(&pack.ray_segment_sizes[0]), thrust::raw_pointer_cast(&pack.tri_segment_sizes[0]), thrust::raw_pointer_cast(&ray_segment_start[0]), thrust::raw_pointer_cast(&tri_segment_start[0]), pack.num_segments, thrust::raw_pointer_cast(&pack.buffered_ray_maxts[0]), thrust::raw_pointer_cast(&pack.buffered_ray_hitids[0]), num_threads_per_block * num_blocks, num_blocks); segtimer.stop(); ctr.brute_force_time += segtimer.get_ms(); Timer segsort("Segmented SOrt"); segsort.start(); thrust::sort_by_key(pack.buffered_ray_idx.begin(), pack.buffered_ray_idx.begin() + pack.ray_buffer_occupied, thrust::make_zip_iterator(thrust::make_tuple(pack.buffered_ray_maxts.begin(), pack.buffered_ray_hitids.begin()))); segsort.stop(); ctr.seg_sort_time += segsort.get_ms(); static thrust::device_vector<int> ray_idx(rtparams.BUFFER_SIZE); static thrust::device_vector<float> ray_maxts(rtparams.BUFFER_SIZE); static thrust::device_vector<int> ray_hitids(rtparams.BUFFER_SIZE); static thrust::equal_to<int> pred; typedef thrust::device_vector<int>::iterator iter; typedef thrust::device_vector<float>::iterator fiter; typedef thrust::zip_iterator<thrust::tuple<fiter, iter> > zippy; thrust::pair<iter, zippy> minend; MinHitFunctor<thrust::tuple<float, int> > min_hit_functor; Timer segred("Segmented Reduce"); segred.start(); minend = thrust::reduce_by_key(pack.buffered_ray_idx.begin(), pack.buffered_ray_idx.begin() + pack.ray_buffer_occupied, thrust::make_zip_iterator(thrust::make_tuple(pack.buffered_ray_maxts.begin(), pack.buffered_ray_hitids.begin())), ray_idx.begin(), thrust::make_zip_iterator(thrust::make_tuple(ray_maxts.begin(), ray_hitids.begin())), pred, min_hit_functor); segred.stop(); ctr.reduction_time += segred.get_ms(); int num_valid_keys = minend.first - ray_idx.begin(); num_threads_per_block = 512; num_blocks = num_valid_keys / num_threads_per_block + (num_valid_keys % num_threads_per_block != 0); Timer update("update"); update.start(); hipLaunchKernelGGL(( updateMinKernel), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, thrust::raw_pointer_cast(&ray_idx[0]), thrust::raw_pointer_cast(&ray_maxts[0]), thrust::raw_pointer_cast(&ray_hitids[0]), thrust::raw_pointer_cast(&pack.dev_ray_maxts[0]), thrust::raw_pointer_cast(&pack.dev_hitids[0]), num_valid_keys); update.stop(); ctr.update_min_time += update.get_ms(); pack.buffered_ray_idx.clear(); pack.buffered_tri_idx.clear(); pack.tri_segment_sizes.clear(); pack.ray_segment_sizes.clear(); pack.buffered_ray_maxts.clear(); pack.buffered_ray_hitids.clear(); pack.segment_ids.clear(); pack.ray_buffer_occupied = 0; pack.tri_buffer_occupied = 0; pack.num_segments = 0; } void gpuDacrtCell(AABB& root, TriangleArray& d_triangles, int* tri_idx_array, int tpivot, RayArray& rays, int* ray_idx_array, int rpivot, float* h_maxts, int* h_hitids, DacrtRunTimeParameters& rtparams, Counters& ctr, Logger& logger) { std::stack<Cell> recursion_stack; std::queue<BruteForceCell> bruteForceQueue; Cell rootcell; rootcell.parent = root; rootcell.prpivot = rpivot; rootcell.ptpivot = tpivot; rootcell.ray_idx.resize(rpivot); rootcell.triangle_idx.resize(tpivot); thrust::copy(thrust::device_ptr<int>(tri_idx_array), thrust::device_ptr<int>(tri_idx_array) + tpivot, rootcell.triangle_idx.begin()); thrust::copy(thrust::device_ptr<int>(ray_idx_array), thrust::device_ptr<int>(ray_idx_array) + rpivot, rootcell.ray_idx.begin()); recursion_stack.push(rootcell); // we will use this vector throughout the iterative procedure thrust::device_vector<int> ray_occupancy; thrust::device_vector<int> tri_occupancy; int tleft = 0, tright = 0, tboth = 0; int rleft = 0, rright = 0, rboth = 0, rnone = 0; int lefttricnt = 0, righttricnt = 0; int leftraycnt = 0, rightraycnt = 0; // initialize the parallel pack thrust::device_vector<int> buffered_ray_idx(rtparams.BUFFER_SIZE); thrust::device_vector<int> buffered_tri_idx(rtparams.BUFFER_SIZE); thrust::device_vector<int> segment_ids(rtparams.MAX_SEGMENTS); thrust::device_vector<int> ray_segment_sizes(rtparams.MAX_SEGMENTS); thrust::device_vector<int> tri_segment_sizes(rtparams.MAX_SEGMENTS); thrust::device_vector<float> buffered_ray_maxts(rtparams.BUFFER_SIZE, FLT_MAX); thrust::device_vector<int> buffered_ray_hitids(rtparams.BUFFER_SIZE, -1); thrust::device_vector<float> dev_ray_maxts(rpivot, FLT_MAX); thrust::device_vector<int> dev_hitids(rpivot, -1); int ray_buffer_occupied = 0; int tri_buffer_occupied = 0; int num_segments = 0; int debugctr = 0; ParallelPack pack(buffered_ray_idx, buffered_tri_idx, segment_ids, ray_segment_sizes, tri_segment_sizes, buffered_ray_maxts, buffered_ray_hitids, dev_ray_maxts, dev_hitids, ray_buffer_occupied, tri_buffer_occupied, num_segments); do { Cell cell = recursion_stack.top(); recursion_stack.pop(); debugctr++; int NUM_THREADS_PER_BLOCK = rtparams.NUM_RAYS_PER_BLOCK; int NUM_BLOCKS = (cell.ptpivot / NUM_THREADS_PER_BLOCK) + (cell.ptpivot % NUM_THREADS_PER_BLOCK != 0); float split_pos; splitSpatialMedian(cell.parent, cell.left, cell.right, split_pos); // reallocate memory for ray and tri occupancy; ray_occupancy.resize(cell.prpivot); tri_occupancy.resize(cell.ptpivot); Timer trifiltertimer("tri filter timer"); trifiltertimer.start(); hipLaunchKernelGGL(( triCellFilterKernel), dim3(NUM_BLOCKS), dim3(NUM_THREADS_PER_BLOCK), 0, 0, cell.left, cell.right, d_triangles.v0, d_triangles.v1, d_triangles.v2, thrust::raw_pointer_cast(&cell.triangle_idx[0]), cell.ptpivot, thrust::raw_pointer_cast(&tri_occupancy[0])); trifiltertimer.stop(); ctr.tri_filter_time += trifiltertimer.get_ms(); Timer trisorttimer("tri sort timer"); trisorttimer.start(); thrust::sort_by_key(tri_occupancy.begin(), tri_occupancy.end(), cell.triangle_idx.begin()); trisorttimer.stop(); ctr.trisortbykey_time += trisorttimer.get_ms(); std::vector<int> temp_keys(3); std::vector<int> temp_values(3); thrust::device_vector<int> dtemp_keys(3); thrust::device_vector<int> dtemp_values(3); //std::vector<int> debug(cell.ptpivot); //thrust::copy(tri_occupancy.begin(), tri_occupancy.end(), debug.begin()); thrust::pair<thrust::device_vector<int>::iterator, thrust::device_vector<int>::iterator> end; Timer triredtimer("tri red timer"); triredtimer.start(); end = thrust::reduce_by_key(tri_occupancy.begin(), tri_occupancy.end(), tri_occupancy.begin(), dtemp_keys.begin(), dtemp_values.begin()); triredtimer.stop(); ctr.trireduction_time += triredtimer.get_ms(); thrust::copy(dtemp_keys.begin(),dtemp_keys.end(), temp_keys.begin()); thrust::copy(dtemp_values.begin(), dtemp_values.end(), temp_values.begin()); int num_types = end.first - dtemp_keys.begin(); int total_tris = 0; bool bleft = false, bright = false, bboth = false; if(num_types == 3) { bleft = bright = bboth = true; tleft = temp_values[0]; tboth = temp_values[1]/2; tright = temp_values[2]/3; } else if(num_types == 2) { // first determine the keys // only two possible combos.!! if(temp_keys[0] == 1) { bleft = true; if(temp_keys[1] == 2) bboth = true; else bright = true; } else if(temp_keys[0] == 2) { bleft = false; bboth = true; bright = true; } if(bleft) { tleft = temp_values[0]; if(bboth) { tboth = temp_values[1]/2; } else if(bright) { tright = temp_values[1]/3; } } else if(bboth) { tboth = temp_values[0]/2; if(bright) { tright = temp_values[1]/3; } } } else if(num_types == 1) { if(temp_keys[0] == 1) bleft = true; else if(temp_keys[0] == 2) bboth = true; else bright = true; if(bleft) { tleft = temp_values[0]; } else if(bboth) { tboth = temp_values[0]/2; } else if(bright) { tright = temp_values[0]/3; } } total_tris = tleft + tright + tboth; if(total_tris != cell.ptpivot) { std::cerr<<"Error in triangle count ; counter value : "<<debugctr<<"\n"; } lefttricnt = tleft + tboth; righttricnt = tright + tboth; if(bleft && bboth) { cell.ltpivot = tleft + tboth; cell.rtpivot = tleft + 1; // even if we dont have separate right values, both counts as right. So this is perfectly valid. } else if(!bleft && bboth) { cell.ltpivot = tboth; cell.rtpivot = 0; } else if(bleft && !bboth) { // we have got only left triangles and no common ones. we have to check if we have right triangles only.!! cell.ltpivot = tleft; if(bright) cell.rtpivot = tleft + 1; else cell.rtpivot = total_tris; // set to the end of the list.! } else if(!bleft && !bboth) { cell.ltpivot = 0; if(bright) cell.rtpivot = 0; // this is the only way we can do this. else cell.rtpivot = total_tris; } // ray stuff /** The arrangement of ray data LLLLLLLLLLLLLL - BBBBBBBB - RRRRRRRRRRR - NNNNNNNNNNNNNN ^ ^ ^ | | | right left right start end end */ // rest all the boolean values here bleft = bright = bboth = false; bool bnone = false; NUM_BLOCKS = (cell.prpivot / NUM_THREADS_PER_BLOCK) + (cell.prpivot % NUM_THREADS_PER_BLOCK != 0); Timer rayfiltertimer("ray filter timer"); rayfiltertimer.start(); hipLaunchKernelGGL(( rayCellFilterKernel), dim3(NUM_BLOCKS), dim3(NUM_THREADS_PER_BLOCK), 0, 0, cell.left, cell.right, rays.o, rays.d, thrust::raw_pointer_cast(&cell.ray_idx[0]), cell.prpivot, thrust::raw_pointer_cast(&ray_occupancy[0])); rayfiltertimer.stop(); ctr.ray_filter_time += rayfiltertimer.get_ms(); Timer raysortimer("ray sort timer"); raysortimer.start(); thrust::sort_by_key(ray_occupancy.begin(), ray_occupancy.end(), cell.ray_idx.begin()); raysortimer.stop(); ctr.raysortbykey_time += raysortimer.get_ms(); dtemp_keys.resize(4); dtemp_values.resize(4); temp_keys.resize(4); temp_values.resize(4); Timer rayreduction("ray reduction timer"); rayreduction.start(); end = thrust::reduce_by_key(ray_occupancy.begin(), ray_occupancy.end(), ray_occupancy.begin(), dtemp_keys.begin(), dtemp_values.begin()); rayreduction.stop(); ctr.rayreduction_time += rayreduction.get_ms(); thrust::copy(dtemp_keys.begin(), dtemp_keys.end(), temp_keys.begin()); thrust::copy(dtemp_values.begin(), dtemp_values.end(), temp_values.begin()); num_types = end.first - dtemp_keys.begin(); int total_rays = 0; /// NOTE: We need to have a detailed ray splitting procedure similar to the triangle one because I envisage situations in which child boxes might not /// have any rays at all.! Possible scenario: secondary ray bounces? if(num_types == 4) { // LBRN branch bleft = bright = bboth = bnone = true; rleft = temp_values[0]; rboth = temp_values[1]/2; rright = temp_values[2]/3; rnone = temp_values[3]/4; } else if(num_types == 3) { if(temp_keys[0] == 1) { bleft = true; rleft = temp_values[0]; if(temp_keys[1] == 2) { // LB branch bboth = true; rboth = temp_values[1]/2; if(temp_keys[2] == 3) { // LBR branch bright = true; rright = temp_values[2]/3; } else if(temp_keys[2] == 4) { // LBN branch bnone = true; rnone = temp_values[2]/4; } } else if(temp_keys[1] == 3) { // left but not both, but right and none branch // LRN branch bright = true; rright = temp_values[1]/3; bnone = true; rnone = temp_values[2]/4; } } else if(temp_keys[0] == 2) { // Both branch // possible values are BRN bboth = bright = bnone = true; rboth = temp_values[0]/2; rright = temp_values[1]/3; rnone = temp_values[2]/4; } } else if(num_types == 2) { if(temp_keys[0] == 1) { // LB or LR or LN bleft = true; rleft = temp_values[0]; if(temp_keys[1] == 2) { bboth = true; rboth = temp_values[1]/2; } else if(temp_keys[1] == 3) { bright = true; rright = temp_values[1]/3; } else if(temp_keys[2] == 4) { bnone = true; rnone = temp_values[1]/4; } } else if(temp_keys[0] == 2) { // BR or BN bboth = true; rboth = temp_values[0]/2; if(temp_keys[1] == 3) { bright = true; rright = temp_values[1]/3; } else if(temp_keys[1] == 4) { bnone = true; rnone = temp_values[1]/4; } } else if(temp_keys[0] == 3) { // RN branch bright = bnone = true; rright = temp_values[0]/3; rnone = temp_values[1]/4; } } else if(num_types == 1) { if(temp_keys[0] == 1) { bleft = true; rleft = temp_values[0]; } else if(temp_keys[0] == 2) { bboth = true; rboth = temp_values[0]/2; } else if(temp_keys[0] == 3) { bright = true; rright = temp_values[0]/3; } else { bnone = true; rnone = temp_values[0]/4; } } // compute all rays total_rays = rleft + rboth + rright + rnone; if(total_rays != cell.prpivot) { std::cerr<<"Error in ray count; counter value : !!"<<debugctr<<"\n"; } leftraycnt = rleft + rboth; rightraycnt = rright + rboth; // the following code is to calculate the right rays indices to copy correctly if(bleft) { cell.rrpivot = rleft + 1; } else if(!bleft) { cell.rrpivot = 0; } else if(!bleft && !bboth) { if(bright) cell.rrpivot = 0; else cell.rrpivot = total_rays; // no rays come inside this box? } // check the conditions for both the left and right brute force children if(righttricnt < 256 || rightraycnt < 256) { //printf("Brute Cell : right tri cnt : %d Right ray cnt : %d\n", righttricnt, rightraycnt); /*BruteForceCell brutecell; brutecell.tricnt = righttricnt; brutecell.raycnt = rightraycnt; brutecell.triangle_idx.resize(righttricnt); brutecell.ray_idx.resize(rightraycnt); thrust::copy(cell.triangle_idx.begin() + cell.rtpivot, cell.triangle_idx.end(), brutecell.triangle_idx.begin()); thrust::copy(cell.ray_idx.begin() + cell.rrpivot, cell.ray_idx.end() - rnone, brutecell.ray_idx.begin()); bruteForceQueue.push(brutecell);*/ if((pack.ray_buffer_occupied + rightraycnt) < rtparams.BUFFER_SIZE && (pack.tri_buffer_occupied + righttricnt) < rtparams.BUFFER_SIZE && pack.num_segments < rtparams.MAX_SEGMENTS) { // replacing with segmented code Timer memcpytimer("mem cpy timer"); memcpytimer.start(); thrust::copy(cell.triangle_idx.begin() + cell.rtpivot, cell.triangle_idx.end(), pack.buffered_tri_idx.begin() + pack.tri_buffer_occupied); thrust::copy(cell.ray_idx.begin() + cell.rrpivot, cell.ray_idx.end() - rnone, pack.buffered_ray_idx.begin() + pack.ray_buffer_occupied); memcpytimer.stop(); ctr.mem_cpy_time += memcpytimer.get_ms(); pack.tri_segment_sizes[pack.num_segments] = righttricnt; pack.ray_segment_sizes[pack.num_segments] = rightraycnt; pack.segment_ids[pack.num_segments] = pack.num_segments; pack.num_segments++; // increment the count pack.ray_buffer_occupied += rightraycnt; pack.tri_buffer_occupied += righttricnt; } else { // complete the brute force and reset stuff completeBruteForce(pack, d_triangles, rays, rtparams, ctr); Timer memcpytimer("mem cpy timer"); memcpytimer.start(); thrust::copy(cell.triangle_idx.begin() + cell.rtpivot, cell.triangle_idx.end(), pack.buffered_tri_idx.begin() + pack.tri_buffer_occupied); thrust::copy(cell.ray_idx.begin() + cell.rrpivot, cell.ray_idx.end() - rnone, pack.buffered_ray_idx.begin() + pack.ray_buffer_occupied); memcpytimer.stop(); ctr.mem_cpy_time += memcpytimer.get_ms(); pack.tri_segment_sizes[pack.num_segments] = righttricnt; pack.ray_segment_sizes[pack.num_segments] = rightraycnt; pack.segment_ids[pack.num_segments] = pack.num_segments; pack.num_segments++; // increment the count pack.ray_buffer_occupied += rightraycnt; pack.tri_buffer_occupied += righttricnt; } } else { Cell rightcell; //printf("Right Tri cnt : %d Right Ray Cnt : %d\n", righttricnt, rightraycnt); rightcell.parent = cell.right; rightcell.ptpivot = righttricnt; rightcell.prpivot = rightraycnt; rightcell.triangle_idx.resize(righttricnt); rightcell.ray_idx.resize(rightraycnt); Timer memcpytimer("mem cpy timer"); memcpytimer.start(); thrust::copy(cell.triangle_idx.begin() + cell.rtpivot, cell.triangle_idx.end(), rightcell.triangle_idx.begin()); thrust::copy(cell.ray_idx.begin() + cell.rrpivot, cell.ray_idx.end() - rnone, rightcell.ray_idx.begin()); memcpytimer.stop(); ctr.mem_cpy_time += memcpytimer.get_ms(); recursion_stack.push(rightcell); } if(lefttricnt < 256 || leftraycnt < 256) { //printf("Brute Cell : left tricnt : %d ; left raycnt : %d\n", lefttricnt, leftraycnt); /* BruteForceCell brutecell; brutecell.tricnt = lefttricnt; brutecell.raycnt = leftraycnt; brutecell.triangle_idx.resize(lefttricnt); brutecell.ray_idx.resize(leftraycnt); thrust::copy(cell.triangle_idx.begin(), cell.triangle_idx.begin() + lefttricnt, brutecell.triangle_idx.begin()); thrust::copy(cell.ray_idx.begin(), cell.ray_idx.begin() + leftraycnt, brutecell.ray_idx.begin()); bruteForceQueue.push(brutecell); */ if((pack.ray_buffer_occupied + leftraycnt) < rtparams.BUFFER_SIZE && (pack.tri_buffer_occupied + lefttricnt) < rtparams.BUFFER_SIZE && pack.num_segments < rtparams.MAX_SEGMENTS) { // replacing with segmented code Timer memcpytimer("mem cpy timer"); memcpytimer.start(); thrust::copy(cell.triangle_idx.begin(), cell.triangle_idx.begin() + lefttricnt, pack.buffered_tri_idx.begin() + pack.tri_buffer_occupied); thrust::copy(cell.ray_idx.begin(), cell.ray_idx.begin() + leftraycnt, pack.buffered_ray_idx.begin() + pack.ray_buffer_occupied); memcpytimer.stop(); ctr.mem_cpy_time += memcpytimer.get_ms(); pack.tri_segment_sizes[pack.num_segments] = lefttricnt; pack.ray_segment_sizes[pack.num_segments] = leftraycnt; pack.segment_ids[pack.num_segments] = pack.num_segments; pack.num_segments++; // increment the count pack.ray_buffer_occupied += leftraycnt; pack.tri_buffer_occupied += lefttricnt; } else { // complete the brute force and reset stuff completeBruteForce(pack, d_triangles, rays, rtparams, ctr); Timer memcpytimer("mem cpy timer"); memcpytimer.start(); thrust::copy(cell.triangle_idx.begin(), cell.triangle_idx.begin() + lefttricnt, pack.buffered_tri_idx.begin() + pack.tri_buffer_occupied); thrust::copy(cell.ray_idx.begin(), cell.ray_idx.begin() + leftraycnt, pack.buffered_ray_idx.begin() + pack.ray_buffer_occupied); memcpytimer.stop(); ctr.mem_cpy_time += memcpytimer.get_ms(); pack.tri_segment_sizes[pack.num_segments] = lefttricnt; pack.ray_segment_sizes[pack.num_segments] = leftraycnt; pack.segment_ids[pack.num_segments] = pack.num_segments; pack.num_segments++; // increment the count pack.ray_buffer_occupied += leftraycnt; pack.tri_buffer_occupied += lefttricnt; } } else { //printf("Left Tri cnt : %d ; Left Ray Cnt : %d\n", lefttricnt, leftraycnt); Cell leftcell; leftcell.parent = cell.left; leftcell.ptpivot = lefttricnt; leftcell.prpivot = leftraycnt; leftcell.triangle_idx.resize(lefttricnt); leftcell.ray_idx.resize(leftraycnt); Timer memcpytimer("mem copy timer"); memcpytimer.start(); thrust::copy(cell.triangle_idx.begin(), cell.triangle_idx.begin() + lefttricnt, leftcell.triangle_idx.begin()); thrust::copy(cell.ray_idx.begin(), cell.ray_idx.begin() + leftraycnt, leftcell.ray_idx.begin()); memcpytimer.stop(); ctr.mem_cpy_time += memcpytimer.get_ms(); recursion_stack.push(leftcell); } // reset all values tleft = 0, tright = 0, tboth = 0; lefttricnt = 0, righttricnt = 0; rleft = 0, rright = 0, rboth = 0, rnone = 0; leftraycnt = 0, rightraycnt = 0; }while(!recursion_stack.empty()); // complete rendering of the brute force queue here if(pack.num_segments > 0) dacrtCompleteRender(pack, d_triangles, rays, rtparams, ctr); // complete the copy thrust::copy(dev_ray_maxts.begin(), dev_ray_maxts.end(), h_maxts); thrust::copy(dev_hitids.begin(), dev_hitids.end(), h_hitids); }
dbbb2ed4cae6c4117002bb6ce4a143695651d956.cu
#include <dacrt/dacrt.h> #include <util/cutimer.h> #include <util/util.h> extern "C" void dacrtCompleteRender(ParallelPack& pack, TriangleArray& dev_triangles, RayArray& dev_rays, DacrtRunTimeParameters& rtparams, Counters& ctr); extern "C" __global__ void updateMinKernel(int* ray_id, float* min_hits, int* minhit_ids, float* global_min, int* global_hits, int num_rays); extern "C" __global__ void segmentedBruteForce(RayArray rays, TriangleArray triangles, int* buffered_ray_ids, int ray_buffer_occupied, int* buffered_tri_ids, int tri_buffer_occupied, int* ray_segment_sizes, int* tri_segment_sizes, int* ray_segment_start, int* tri_segment_start, int num_segments, float* maxts, int* hitids, int num_threads_launched, int num_blocks_launched); /** CELL BASED VERSION ****************** Idea: If I can somehow combine the two sorting operations on both the left and right child of a single node, then I will be saving twice the savings in sort operations Expected Difficulties: How are we going to maintain state info across the nodes now? We need detailed plans TODO: Add detailed comments below before doing any operation */ struct Cell { AABB parent, left, right; int ptpivot, prpivot, ltpivot, rtpivot, lrpivot, rrpivot; // p*pivot => parent pivot; l*pivot=>left child ; r*pivot => right child thrust::device_vector<int> ray_idx; thrust::device_vector<int> triangle_idx; //thrust::device_vector<int> ray_occupancy; //thrust::device_vector<int> tri_occupancy; Cell() { ptpivot = prpivot = ltpivot = rtpivot = lrpivot = rrpivot = 0; } }; struct BruteForceCell { int tricnt, raycnt; thrust::device_vector<int> ray_idx; thrust::device_vector<int> triangle_idx; }; // cell filter kernels /// NOTE: Key idea here is to assign values so that we can effectively split the left and right in one pass /// So, left = 1, right = 3 and both = 2 __global__ void triCellFilterKernel(AABB left, AABB right, float3* v0, float3* v1, float3* v2, int* tri_idx, int num_tris, int* occupancy) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < num_tris) { /// NOTE : float3 lcentroid = (left.bmin + left.bmax) * 0.5f; float3 rcentroid = (right.bmin + right.bmax) * 0.5f; float3 lextents = left.bmax - left.bmin; float3 rextents = right.bmax - right.bmin; int triangle_id = tri_idx[tid]; float triverts[3][3] = {{v0[triangle_id].x, v0[triangle_id].y, v0[triangle_id].z}, {v1[triangle_id].x, v1[triangle_id].y, v1[triangle_id].z}, {v2[triangle_id].x, v2[triangle_id].y, v2[triangle_id].z}}; float lboxhalf[3] = {lextents.x * 0.5f, lextents.y * 0.5f, lextents.z * 0.5f}; float rboxhalf[3] = {rextents.x * 0.5f, rextents.y * 0.5f, rextents.z * 0.5f}; float lboxcenter[3] = {lcentroid.x, lcentroid.y, lcentroid.z}; float rboxcenter[3] = {rcentroid.x, rcentroid.y, rcentroid.z}; /// TODO: Can we replace this costly test with the simpler test? Any jump in total performance and not only this small step. int lo = triBoxOverlap(lboxcenter, lboxhalf, triverts); int ro = triBoxOverlap(rboxcenter, rboxhalf, triverts); //if(lo == 1 && ro == 1) printf("Hey : threadIdx : %d\n", tid); int val = lo && ro ? 2 : (lo ? 1 : 3); occupancy[tid] = val; } }; __global__ void rayCellFilterKernel(AABB left, AABB right, float3* o, float3* dir, int* ray_ids, int num_rays, int* occupancy) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < num_rays) { int ray_id = ray_ids[tid]; Ray ray(o[ray_id], dir[ray_id]); float thit; bool lo = left.rayIntersect(ray, thit); bool ro = right.rayIntersect(ray, thit); // left = 1, right = 3, both = 2 and no_hit = 4 int val = lo && ro ? 2 : (lo ? 1 : (ro ? 3 : 4)); occupancy[tid] = val; } } // we call this function to complete the parallel brute force. We've written this as a function call so that we can use it elsewhere extern "C" void completeBruteForce(ParallelPack& pack, TriangleArray& d_triangles, RayArray& d_rays, DacrtRunTimeParameters& rtparams, Counters& ctr) { thrust::device_vector<int> ray_segment_start(pack.num_segments); thrust::device_vector<int> tri_segment_start(pack.num_segments); thrust::exclusive_scan(pack.tri_segment_sizes.begin(), pack.tri_segment_sizes.begin() + pack.num_segments, tri_segment_start.begin()); thrust::exclusive_scan(pack.ray_segment_sizes.begin(), pack.ray_segment_sizes.begin() + pack.num_segments, ray_segment_start.begin()); int num_blocks = pack.num_segments; int num_threads_per_block = rtparams.NUM_RAYS_PER_BLOCK; Timer segtimer("Segmented timer"); segtimer.start(); segmentedBruteForce<<<num_blocks, num_threads_per_block>>>(d_rays, d_triangles, thrust::raw_pointer_cast(&pack.buffered_ray_idx[0]), pack.ray_buffer_occupied, thrust::raw_pointer_cast(&pack.buffered_tri_idx[0]), pack.tri_buffer_occupied, thrust::raw_pointer_cast(&pack.ray_segment_sizes[0]), thrust::raw_pointer_cast(&pack.tri_segment_sizes[0]), thrust::raw_pointer_cast(&ray_segment_start[0]), thrust::raw_pointer_cast(&tri_segment_start[0]), pack.num_segments, thrust::raw_pointer_cast(&pack.buffered_ray_maxts[0]), thrust::raw_pointer_cast(&pack.buffered_ray_hitids[0]), num_threads_per_block * num_blocks, num_blocks); segtimer.stop(); ctr.brute_force_time += segtimer.get_ms(); Timer segsort("Segmented SOrt"); segsort.start(); thrust::sort_by_key(pack.buffered_ray_idx.begin(), pack.buffered_ray_idx.begin() + pack.ray_buffer_occupied, thrust::make_zip_iterator(thrust::make_tuple(pack.buffered_ray_maxts.begin(), pack.buffered_ray_hitids.begin()))); segsort.stop(); ctr.seg_sort_time += segsort.get_ms(); static thrust::device_vector<int> ray_idx(rtparams.BUFFER_SIZE); static thrust::device_vector<float> ray_maxts(rtparams.BUFFER_SIZE); static thrust::device_vector<int> ray_hitids(rtparams.BUFFER_SIZE); static thrust::equal_to<int> pred; typedef thrust::device_vector<int>::iterator iter; typedef thrust::device_vector<float>::iterator fiter; typedef thrust::zip_iterator<thrust::tuple<fiter, iter> > zippy; thrust::pair<iter, zippy> minend; MinHitFunctor<thrust::tuple<float, int> > min_hit_functor; Timer segred("Segmented Reduce"); segred.start(); minend = thrust::reduce_by_key(pack.buffered_ray_idx.begin(), pack.buffered_ray_idx.begin() + pack.ray_buffer_occupied, thrust::make_zip_iterator(thrust::make_tuple(pack.buffered_ray_maxts.begin(), pack.buffered_ray_hitids.begin())), ray_idx.begin(), thrust::make_zip_iterator(thrust::make_tuple(ray_maxts.begin(), ray_hitids.begin())), pred, min_hit_functor); segred.stop(); ctr.reduction_time += segred.get_ms(); int num_valid_keys = minend.first - ray_idx.begin(); num_threads_per_block = 512; num_blocks = num_valid_keys / num_threads_per_block + (num_valid_keys % num_threads_per_block != 0); Timer update("update"); update.start(); updateMinKernel<<<num_blocks, num_threads_per_block>>>(thrust::raw_pointer_cast(&ray_idx[0]), thrust::raw_pointer_cast(&ray_maxts[0]), thrust::raw_pointer_cast(&ray_hitids[0]), thrust::raw_pointer_cast(&pack.dev_ray_maxts[0]), thrust::raw_pointer_cast(&pack.dev_hitids[0]), num_valid_keys); update.stop(); ctr.update_min_time += update.get_ms(); pack.buffered_ray_idx.clear(); pack.buffered_tri_idx.clear(); pack.tri_segment_sizes.clear(); pack.ray_segment_sizes.clear(); pack.buffered_ray_maxts.clear(); pack.buffered_ray_hitids.clear(); pack.segment_ids.clear(); pack.ray_buffer_occupied = 0; pack.tri_buffer_occupied = 0; pack.num_segments = 0; } void gpuDacrtCell(AABB& root, TriangleArray& d_triangles, int* tri_idx_array, int tpivot, RayArray& rays, int* ray_idx_array, int rpivot, float* h_maxts, int* h_hitids, DacrtRunTimeParameters& rtparams, Counters& ctr, Logger& logger) { std::stack<Cell> recursion_stack; std::queue<BruteForceCell> bruteForceQueue; Cell rootcell; rootcell.parent = root; rootcell.prpivot = rpivot; rootcell.ptpivot = tpivot; rootcell.ray_idx.resize(rpivot); rootcell.triangle_idx.resize(tpivot); thrust::copy(thrust::device_ptr<int>(tri_idx_array), thrust::device_ptr<int>(tri_idx_array) + tpivot, rootcell.triangle_idx.begin()); thrust::copy(thrust::device_ptr<int>(ray_idx_array), thrust::device_ptr<int>(ray_idx_array) + rpivot, rootcell.ray_idx.begin()); recursion_stack.push(rootcell); // we will use this vector throughout the iterative procedure thrust::device_vector<int> ray_occupancy; thrust::device_vector<int> tri_occupancy; int tleft = 0, tright = 0, tboth = 0; int rleft = 0, rright = 0, rboth = 0, rnone = 0; int lefttricnt = 0, righttricnt = 0; int leftraycnt = 0, rightraycnt = 0; // initialize the parallel pack thrust::device_vector<int> buffered_ray_idx(rtparams.BUFFER_SIZE); thrust::device_vector<int> buffered_tri_idx(rtparams.BUFFER_SIZE); thrust::device_vector<int> segment_ids(rtparams.MAX_SEGMENTS); thrust::device_vector<int> ray_segment_sizes(rtparams.MAX_SEGMENTS); thrust::device_vector<int> tri_segment_sizes(rtparams.MAX_SEGMENTS); thrust::device_vector<float> buffered_ray_maxts(rtparams.BUFFER_SIZE, FLT_MAX); thrust::device_vector<int> buffered_ray_hitids(rtparams.BUFFER_SIZE, -1); thrust::device_vector<float> dev_ray_maxts(rpivot, FLT_MAX); thrust::device_vector<int> dev_hitids(rpivot, -1); int ray_buffer_occupied = 0; int tri_buffer_occupied = 0; int num_segments = 0; int debugctr = 0; ParallelPack pack(buffered_ray_idx, buffered_tri_idx, segment_ids, ray_segment_sizes, tri_segment_sizes, buffered_ray_maxts, buffered_ray_hitids, dev_ray_maxts, dev_hitids, ray_buffer_occupied, tri_buffer_occupied, num_segments); do { Cell cell = recursion_stack.top(); recursion_stack.pop(); debugctr++; int NUM_THREADS_PER_BLOCK = rtparams.NUM_RAYS_PER_BLOCK; int NUM_BLOCKS = (cell.ptpivot / NUM_THREADS_PER_BLOCK) + (cell.ptpivot % NUM_THREADS_PER_BLOCK != 0); float split_pos; splitSpatialMedian(cell.parent, cell.left, cell.right, split_pos); // reallocate memory for ray and tri occupancy; ray_occupancy.resize(cell.prpivot); tri_occupancy.resize(cell.ptpivot); Timer trifiltertimer("tri filter timer"); trifiltertimer.start(); triCellFilterKernel<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK>>>(cell.left, cell.right, d_triangles.v0, d_triangles.v1, d_triangles.v2, thrust::raw_pointer_cast(&cell.triangle_idx[0]), cell.ptpivot, thrust::raw_pointer_cast(&tri_occupancy[0])); trifiltertimer.stop(); ctr.tri_filter_time += trifiltertimer.get_ms(); Timer trisorttimer("tri sort timer"); trisorttimer.start(); thrust::sort_by_key(tri_occupancy.begin(), tri_occupancy.end(), cell.triangle_idx.begin()); trisorttimer.stop(); ctr.trisortbykey_time += trisorttimer.get_ms(); std::vector<int> temp_keys(3); std::vector<int> temp_values(3); thrust::device_vector<int> dtemp_keys(3); thrust::device_vector<int> dtemp_values(3); //std::vector<int> debug(cell.ptpivot); //thrust::copy(tri_occupancy.begin(), tri_occupancy.end(), debug.begin()); thrust::pair<thrust::device_vector<int>::iterator, thrust::device_vector<int>::iterator> end; Timer triredtimer("tri red timer"); triredtimer.start(); end = thrust::reduce_by_key(tri_occupancy.begin(), tri_occupancy.end(), tri_occupancy.begin(), dtemp_keys.begin(), dtemp_values.begin()); triredtimer.stop(); ctr.trireduction_time += triredtimer.get_ms(); thrust::copy(dtemp_keys.begin(),dtemp_keys.end(), temp_keys.begin()); thrust::copy(dtemp_values.begin(), dtemp_values.end(), temp_values.begin()); int num_types = end.first - dtemp_keys.begin(); int total_tris = 0; bool bleft = false, bright = false, bboth = false; if(num_types == 3) { bleft = bright = bboth = true; tleft = temp_values[0]; tboth = temp_values[1]/2; tright = temp_values[2]/3; } else if(num_types == 2) { // first determine the keys // only two possible combos.!! if(temp_keys[0] == 1) { bleft = true; if(temp_keys[1] == 2) bboth = true; else bright = true; } else if(temp_keys[0] == 2) { bleft = false; bboth = true; bright = true; } if(bleft) { tleft = temp_values[0]; if(bboth) { tboth = temp_values[1]/2; } else if(bright) { tright = temp_values[1]/3; } } else if(bboth) { tboth = temp_values[0]/2; if(bright) { tright = temp_values[1]/3; } } } else if(num_types == 1) { if(temp_keys[0] == 1) bleft = true; else if(temp_keys[0] == 2) bboth = true; else bright = true; if(bleft) { tleft = temp_values[0]; } else if(bboth) { tboth = temp_values[0]/2; } else if(bright) { tright = temp_values[0]/3; } } total_tris = tleft + tright + tboth; if(total_tris != cell.ptpivot) { std::cerr<<"Error in triangle count ; counter value : "<<debugctr<<"\n"; } lefttricnt = tleft + tboth; righttricnt = tright + tboth; if(bleft && bboth) { cell.ltpivot = tleft + tboth; cell.rtpivot = tleft + 1; // even if we dont have separate right values, both counts as right. So this is perfectly valid. } else if(!bleft && bboth) { cell.ltpivot = tboth; cell.rtpivot = 0; } else if(bleft && !bboth) { // we have got only left triangles and no common ones. we have to check if we have right triangles only.!! cell.ltpivot = tleft; if(bright) cell.rtpivot = tleft + 1; else cell.rtpivot = total_tris; // set to the end of the list.! } else if(!bleft && !bboth) { cell.ltpivot = 0; if(bright) cell.rtpivot = 0; // this is the only way we can do this. else cell.rtpivot = total_tris; } // ray stuff /** The arrangement of ray data LLLLLLLLLLLLLL - BBBBBBBB - RRRRRRRRRRR - NNNNNNNNNNNNNN ^ ^ ^ | | | right left right start end end */ // rest all the boolean values here bleft = bright = bboth = false; bool bnone = false; NUM_BLOCKS = (cell.prpivot / NUM_THREADS_PER_BLOCK) + (cell.prpivot % NUM_THREADS_PER_BLOCK != 0); Timer rayfiltertimer("ray filter timer"); rayfiltertimer.start(); rayCellFilterKernel<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK>>>(cell.left, cell.right, rays.o, rays.d, thrust::raw_pointer_cast(&cell.ray_idx[0]), cell.prpivot, thrust::raw_pointer_cast(&ray_occupancy[0])); rayfiltertimer.stop(); ctr.ray_filter_time += rayfiltertimer.get_ms(); Timer raysortimer("ray sort timer"); raysortimer.start(); thrust::sort_by_key(ray_occupancy.begin(), ray_occupancy.end(), cell.ray_idx.begin()); raysortimer.stop(); ctr.raysortbykey_time += raysortimer.get_ms(); dtemp_keys.resize(4); dtemp_values.resize(4); temp_keys.resize(4); temp_values.resize(4); Timer rayreduction("ray reduction timer"); rayreduction.start(); end = thrust::reduce_by_key(ray_occupancy.begin(), ray_occupancy.end(), ray_occupancy.begin(), dtemp_keys.begin(), dtemp_values.begin()); rayreduction.stop(); ctr.rayreduction_time += rayreduction.get_ms(); thrust::copy(dtemp_keys.begin(), dtemp_keys.end(), temp_keys.begin()); thrust::copy(dtemp_values.begin(), dtemp_values.end(), temp_values.begin()); num_types = end.first - dtemp_keys.begin(); int total_rays = 0; /// NOTE: We need to have a detailed ray splitting procedure similar to the triangle one because I envisage situations in which child boxes might not /// have any rays at all.! Possible scenario: secondary ray bounces? if(num_types == 4) { // LBRN branch bleft = bright = bboth = bnone = true; rleft = temp_values[0]; rboth = temp_values[1]/2; rright = temp_values[2]/3; rnone = temp_values[3]/4; } else if(num_types == 3) { if(temp_keys[0] == 1) { bleft = true; rleft = temp_values[0]; if(temp_keys[1] == 2) { // LB branch bboth = true; rboth = temp_values[1]/2; if(temp_keys[2] == 3) { // LBR branch bright = true; rright = temp_values[2]/3; } else if(temp_keys[2] == 4) { // LBN branch bnone = true; rnone = temp_values[2]/4; } } else if(temp_keys[1] == 3) { // left but not both, but right and none branch // LRN branch bright = true; rright = temp_values[1]/3; bnone = true; rnone = temp_values[2]/4; } } else if(temp_keys[0] == 2) { // Both branch // possible values are BRN bboth = bright = bnone = true; rboth = temp_values[0]/2; rright = temp_values[1]/3; rnone = temp_values[2]/4; } } else if(num_types == 2) { if(temp_keys[0] == 1) { // LB or LR or LN bleft = true; rleft = temp_values[0]; if(temp_keys[1] == 2) { bboth = true; rboth = temp_values[1]/2; } else if(temp_keys[1] == 3) { bright = true; rright = temp_values[1]/3; } else if(temp_keys[2] == 4) { bnone = true; rnone = temp_values[1]/4; } } else if(temp_keys[0] == 2) { // BR or BN bboth = true; rboth = temp_values[0]/2; if(temp_keys[1] == 3) { bright = true; rright = temp_values[1]/3; } else if(temp_keys[1] == 4) { bnone = true; rnone = temp_values[1]/4; } } else if(temp_keys[0] == 3) { // RN branch bright = bnone = true; rright = temp_values[0]/3; rnone = temp_values[1]/4; } } else if(num_types == 1) { if(temp_keys[0] == 1) { bleft = true; rleft = temp_values[0]; } else if(temp_keys[0] == 2) { bboth = true; rboth = temp_values[0]/2; } else if(temp_keys[0] == 3) { bright = true; rright = temp_values[0]/3; } else { bnone = true; rnone = temp_values[0]/4; } } // compute all rays total_rays = rleft + rboth + rright + rnone; if(total_rays != cell.prpivot) { std::cerr<<"Error in ray count; counter value : !!"<<debugctr<<"\n"; } leftraycnt = rleft + rboth; rightraycnt = rright + rboth; // the following code is to calculate the right rays indices to copy correctly if(bleft) { cell.rrpivot = rleft + 1; } else if(!bleft) { cell.rrpivot = 0; } else if(!bleft && !bboth) { if(bright) cell.rrpivot = 0; else cell.rrpivot = total_rays; // no rays come inside this box? } // check the conditions for both the left and right brute force children if(righttricnt < 256 || rightraycnt < 256) { //printf("Brute Cell : right tri cnt : %d Right ray cnt : %d\n", righttricnt, rightraycnt); /*BruteForceCell brutecell; brutecell.tricnt = righttricnt; brutecell.raycnt = rightraycnt; brutecell.triangle_idx.resize(righttricnt); brutecell.ray_idx.resize(rightraycnt); thrust::copy(cell.triangle_idx.begin() + cell.rtpivot, cell.triangle_idx.end(), brutecell.triangle_idx.begin()); thrust::copy(cell.ray_idx.begin() + cell.rrpivot, cell.ray_idx.end() - rnone, brutecell.ray_idx.begin()); bruteForceQueue.push(brutecell);*/ if((pack.ray_buffer_occupied + rightraycnt) < rtparams.BUFFER_SIZE && (pack.tri_buffer_occupied + righttricnt) < rtparams.BUFFER_SIZE && pack.num_segments < rtparams.MAX_SEGMENTS) { // replacing with segmented code Timer memcpytimer("mem cpy timer"); memcpytimer.start(); thrust::copy(cell.triangle_idx.begin() + cell.rtpivot, cell.triangle_idx.end(), pack.buffered_tri_idx.begin() + pack.tri_buffer_occupied); thrust::copy(cell.ray_idx.begin() + cell.rrpivot, cell.ray_idx.end() - rnone, pack.buffered_ray_idx.begin() + pack.ray_buffer_occupied); memcpytimer.stop(); ctr.mem_cpy_time += memcpytimer.get_ms(); pack.tri_segment_sizes[pack.num_segments] = righttricnt; pack.ray_segment_sizes[pack.num_segments] = rightraycnt; pack.segment_ids[pack.num_segments] = pack.num_segments; pack.num_segments++; // increment the count pack.ray_buffer_occupied += rightraycnt; pack.tri_buffer_occupied += righttricnt; } else { // complete the brute force and reset stuff completeBruteForce(pack, d_triangles, rays, rtparams, ctr); Timer memcpytimer("mem cpy timer"); memcpytimer.start(); thrust::copy(cell.triangle_idx.begin() + cell.rtpivot, cell.triangle_idx.end(), pack.buffered_tri_idx.begin() + pack.tri_buffer_occupied); thrust::copy(cell.ray_idx.begin() + cell.rrpivot, cell.ray_idx.end() - rnone, pack.buffered_ray_idx.begin() + pack.ray_buffer_occupied); memcpytimer.stop(); ctr.mem_cpy_time += memcpytimer.get_ms(); pack.tri_segment_sizes[pack.num_segments] = righttricnt; pack.ray_segment_sizes[pack.num_segments] = rightraycnt; pack.segment_ids[pack.num_segments] = pack.num_segments; pack.num_segments++; // increment the count pack.ray_buffer_occupied += rightraycnt; pack.tri_buffer_occupied += righttricnt; } } else { Cell rightcell; //printf("Right Tri cnt : %d Right Ray Cnt : %d\n", righttricnt, rightraycnt); rightcell.parent = cell.right; rightcell.ptpivot = righttricnt; rightcell.prpivot = rightraycnt; rightcell.triangle_idx.resize(righttricnt); rightcell.ray_idx.resize(rightraycnt); Timer memcpytimer("mem cpy timer"); memcpytimer.start(); thrust::copy(cell.triangle_idx.begin() + cell.rtpivot, cell.triangle_idx.end(), rightcell.triangle_idx.begin()); thrust::copy(cell.ray_idx.begin() + cell.rrpivot, cell.ray_idx.end() - rnone, rightcell.ray_idx.begin()); memcpytimer.stop(); ctr.mem_cpy_time += memcpytimer.get_ms(); recursion_stack.push(rightcell); } if(lefttricnt < 256 || leftraycnt < 256) { //printf("Brute Cell : left tricnt : %d ; left raycnt : %d\n", lefttricnt, leftraycnt); /* BruteForceCell brutecell; brutecell.tricnt = lefttricnt; brutecell.raycnt = leftraycnt; brutecell.triangle_idx.resize(lefttricnt); brutecell.ray_idx.resize(leftraycnt); thrust::copy(cell.triangle_idx.begin(), cell.triangle_idx.begin() + lefttricnt, brutecell.triangle_idx.begin()); thrust::copy(cell.ray_idx.begin(), cell.ray_idx.begin() + leftraycnt, brutecell.ray_idx.begin()); bruteForceQueue.push(brutecell); */ if((pack.ray_buffer_occupied + leftraycnt) < rtparams.BUFFER_SIZE && (pack.tri_buffer_occupied + lefttricnt) < rtparams.BUFFER_SIZE && pack.num_segments < rtparams.MAX_SEGMENTS) { // replacing with segmented code Timer memcpytimer("mem cpy timer"); memcpytimer.start(); thrust::copy(cell.triangle_idx.begin(), cell.triangle_idx.begin() + lefttricnt, pack.buffered_tri_idx.begin() + pack.tri_buffer_occupied); thrust::copy(cell.ray_idx.begin(), cell.ray_idx.begin() + leftraycnt, pack.buffered_ray_idx.begin() + pack.ray_buffer_occupied); memcpytimer.stop(); ctr.mem_cpy_time += memcpytimer.get_ms(); pack.tri_segment_sizes[pack.num_segments] = lefttricnt; pack.ray_segment_sizes[pack.num_segments] = leftraycnt; pack.segment_ids[pack.num_segments] = pack.num_segments; pack.num_segments++; // increment the count pack.ray_buffer_occupied += leftraycnt; pack.tri_buffer_occupied += lefttricnt; } else { // complete the brute force and reset stuff completeBruteForce(pack, d_triangles, rays, rtparams, ctr); Timer memcpytimer("mem cpy timer"); memcpytimer.start(); thrust::copy(cell.triangle_idx.begin(), cell.triangle_idx.begin() + lefttricnt, pack.buffered_tri_idx.begin() + pack.tri_buffer_occupied); thrust::copy(cell.ray_idx.begin(), cell.ray_idx.begin() + leftraycnt, pack.buffered_ray_idx.begin() + pack.ray_buffer_occupied); memcpytimer.stop(); ctr.mem_cpy_time += memcpytimer.get_ms(); pack.tri_segment_sizes[pack.num_segments] = lefttricnt; pack.ray_segment_sizes[pack.num_segments] = leftraycnt; pack.segment_ids[pack.num_segments] = pack.num_segments; pack.num_segments++; // increment the count pack.ray_buffer_occupied += leftraycnt; pack.tri_buffer_occupied += lefttricnt; } } else { //printf("Left Tri cnt : %d ; Left Ray Cnt : %d\n", lefttricnt, leftraycnt); Cell leftcell; leftcell.parent = cell.left; leftcell.ptpivot = lefttricnt; leftcell.prpivot = leftraycnt; leftcell.triangle_idx.resize(lefttricnt); leftcell.ray_idx.resize(leftraycnt); Timer memcpytimer("mem copy timer"); memcpytimer.start(); thrust::copy(cell.triangle_idx.begin(), cell.triangle_idx.begin() + lefttricnt, leftcell.triangle_idx.begin()); thrust::copy(cell.ray_idx.begin(), cell.ray_idx.begin() + leftraycnt, leftcell.ray_idx.begin()); memcpytimer.stop(); ctr.mem_cpy_time += memcpytimer.get_ms(); recursion_stack.push(leftcell); } // reset all values tleft = 0, tright = 0, tboth = 0; lefttricnt = 0, righttricnt = 0; rleft = 0, rright = 0, rboth = 0, rnone = 0; leftraycnt = 0, rightraycnt = 0; }while(!recursion_stack.empty()); // complete rendering of the brute force queue here if(pack.num_segments > 0) dacrtCompleteRender(pack, d_triangles, rays, rtparams, ctr); // complete the copy thrust::copy(dev_ray_maxts.begin(), dev_ray_maxts.end(), h_maxts); thrust::copy(dev_hitids.begin(), dev_hitids.end(), h_hitids); }
ead53196ebab7ce10a028ae6c65ccf7028ca4c52.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hiprand/hiprand_kernel.h> #include <hip/hip_runtime.h> #include "tpcc_table.h" #include "tx.h" #include "utility.h" #include "table_operator.h" #define MAX_PRINT_ORDER_LINE 1000 //test for order_status transaction!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! __device__ void tx_order_status(){ // long os_C_W_ID=1; long os_C_D_ID=3; long os_C_ID=3; //customer ID // printf("Order Status Transaction is start!\n"); //customersC_W_ID,C_D_ID,C_ID struct customer customer_tmp; struct customer *p_customer_tmp; int rid_in_customers=0; while(1){ rid_in_customers = table_scan(CUSTOMER, LONG, sizeof(long int), ((long)&(customer_tmp.C_W_ID)-(long)&(customer_tmp.C_ID)), EQ, &os_C_W_ID, rid_in_customers); //customerC_W_IDos_C_W_ID if(rid_in_customers == -1){ //customersos_C_W_ID printf("Transaction Exception:\tC_W_ID is not in customer table!\n"); return; } p_customer_tmp=(struct customer *)get(CUSTOMER, rid_in_customers); //os_C_W_ID if(p_customer_tmp->C_D_ID == os_C_D_ID && p_customer_tmp->C_ID == os_C_ID){ //customersos_C_W_ID,os_C_D_ID,os_C_ID break; }else{ rid_in_customers++; } } printf("rid_in_customers:%d\tC_W_ID:%ld\tC_D_ID:%ld\tC_ID:%ld\tC_FIRST:%s\tC_MIDDLE:%s\tC_LAST:%s\tC_BALANCE:%lf\n", rid_in_customers, p_customer_tmp->C_W_ID, p_customer_tmp->C_D_ID, p_customer_tmp->C_ID, p_customer_tmp->C_FIRST, // p_customer_tmp->C_MIDDLE, p_customer_tmp->C_LAST, p_customer_tmp->C_BALANCE); // //ordersO_W_IDO_D_IDO_C_IDO_ID struct order order_tmp; struct order *p_order_tmp; int rid_in_orders=0; int os_rid_in_orders_max=-1; long os_O_ID_max=-1; while(1){ rid_in_orders = table_scan(ORDER, LONG, sizeof(long int), ((long)&(order_tmp.O_W_ID)-(long)&(order_tmp.O_ID)), EQ, &os_C_W_ID, rid_in_orders); //orderO_W_IDos_C_W_ID if(rid_in_orders == -1){ break; } p_order_tmp=(struct order *)get(ORDER, rid_in_orders); //os_C_W_ID if(p_order_tmp->O_D_ID == os_C_D_ID && p_order_tmp->O_C_ID == os_C_ID && p_order_tmp->O_ID > os_O_ID_max){ os_rid_in_orders_max=rid_in_orders; os_O_ID_max=p_order_tmp->O_ID; rid_in_orders++; }else{ rid_in_orders++; } } if(os_rid_in_orders_max == -1){ //orders(os_C_W_IDos_C_D_IDos_C_ID) printf("Transaction Exception:\thave not found right record in order table!\n"); return; }else{ p_order_tmp=(struct order *)get(ORDER, os_rid_in_orders_max); printf("rid_in_orders:%d\tO_ID:%ld\tO_ENTRY_DATE:%ld\tO_CARRIER_ID:%ld\n", os_rid_in_orders_max, p_order_tmp->O_ID, // p_order_tmp->O_ENTRY_DATE, // p_order_tmp->O_CARRIER_ID); // } //orderlinesOL_W_IDOL_D_IDOL_O_ID struct order_line orderline_tmp; struct order_line *p_orderline_tmp; int rid_in_orderlines=0; struct order_line orderline_arr[MAX_PRINT_ORDER_LINE]; int count=0; while(1){ rid_in_orderlines = table_scan(ORDER_LINE, LONG, sizeof(long int), ((long)&(orderline_tmp.OL_W_ID)-(long)&(orderline_tmp.OL_O_ID)), EQ, &os_C_W_ID, rid_in_orderlines); //orderlinesC_W_IDos_C_W_ID if(rid_in_orderlines == -1){ break; } p_orderline_tmp=(struct order_line *)get(ORDER_LINE, rid_in_orderlines); //os_C_W_ID if(p_orderline_tmp->OL_D_ID == os_C_D_ID && p_orderline_tmp->OL_O_ID == p_order_tmp->O_ID){ d_memcpy(&(orderline_arr[count]),p_orderline_tmp,sizeof(struct order_line)); count++; if(count==MAX_PRINT_ORDER_LINE){ break; } rid_in_orderlines++; }else{ rid_in_orderlines++; } } if(count==0){ printf("Transaction Exception:\thave not found right record in orderline table!\n"); return; }else{ int i,j,k; for(i=0;i<count;i++){ // k=i; for(j=i+1;j<count;j++){ if(orderline_arr[j].OL_I_ID>orderline_arr[k].OL_I_ID){ k=j; } } if(i!=k){ d_memcpy(&orderline_tmp,&orderline_arr[i],sizeof(struct order_line)); d_memcpy(&orderline_arr[i],&orderline_arr[k],sizeof(struct order_line)); d_memcpy(&orderline_arr[k],&orderline_tmp,sizeof(struct order_line)); } printf("OL_I_ID:%ld\tOL_SUPPLY_W_ID:%ld\tOL_QUANTITY:%lf\tOL_AMOUNT:%lf\tOL_DELIVERY_D:%ld\n", orderline_arr[i].OL_I_ID, orderline_arr[i].OL_SUPPLY_W_ID, // orderline_arr[i].OL_QUANTITY, // orderline_arr[i].OL_AMOUNT, // orderline_arr[i].OL_DELIVERY_D); // } } // printf("Order Status Transaction is finished!\n"); }
ead53196ebab7ce10a028ae6c65ccf7028ca4c52.cu
#include <stdio.h> #include <curand_kernel.h> #include <cuda_runtime.h> #include "tpcc_table.h" #include "tx.h" #include "utility.h" #include "table_operator.h" #define MAX_PRINT_ORDER_LINE 1000 //test for order_status transaction!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! __device__ void tx_order_status(){ //输入的数据 long os_C_W_ID=1; long os_C_D_ID=3; long os_C_ID=3; //customer ID //事务启动 printf("Order Status Transaction is start!\n"); //在客户表(customers)中查询客户代码(C_W_ID,C_D_ID,C_ID)。 struct customer customer_tmp; struct customer *p_customer_tmp; int rid_in_customers=0; while(1){ rid_in_customers = table_scan(CUSTOMER, LONG, sizeof(long int), ((long)&(customer_tmp.C_W_ID)-(long)&(customer_tmp.C_ID)), EQ, &os_C_W_ID, rid_in_customers); //扫描整张customer表找出C_W_ID为os_C_W_ID的记录号 if(rid_in_customers == -1){ //customers中没有os_C_W_ID对应的记录 printf("Transaction Exception:\tC_W_ID is not in customer table!\n"); return; } p_customer_tmp=(struct customer *)get(CUSTOMER, rid_in_customers); //获取os_C_W_ID对应的记录 if(p_customer_tmp->C_D_ID == os_C_D_ID && p_customer_tmp->C_ID == os_C_ID){ //customers中有os_C_W_ID,os_C_D_ID,os_C_ID对应的记录 break; }else{ rid_in_customers++; } } printf("rid_in_customers:%d\tC_W_ID:%ld\tC_D_ID:%ld\tC_ID:%ld\tC_FIRST:%s\tC_MIDDLE:%s\tC_LAST:%s\tC_BALANCE:%lf\n", rid_in_customers, p_customer_tmp->C_W_ID, p_customer_tmp->C_D_ID, p_customer_tmp->C_ID, p_customer_tmp->C_FIRST, //取姓名 p_customer_tmp->C_MIDDLE, p_customer_tmp->C_LAST, p_customer_tmp->C_BALANCE); //欠款余额 //在定单表(orders)中查询仓库代码(O_W_ID)、地区代码(O_D_ID)、客户代码(O_C_ID),并且定单代码(O_ID)最大的记录。 struct order order_tmp; struct order *p_order_tmp; int rid_in_orders=0; int os_rid_in_orders_max=-1; long os_O_ID_max=-1; while(1){ rid_in_orders = table_scan(ORDER, LONG, sizeof(long int), ((long)&(order_tmp.O_W_ID)-(long)&(order_tmp.O_ID)), EQ, &os_C_W_ID, rid_in_orders); //扫描整张order表找出O_W_ID为os_C_W_ID的记录号 if(rid_in_orders == -1){ break; } p_order_tmp=(struct order *)get(ORDER, rid_in_orders); //获取os_C_W_ID对应的记录 if(p_order_tmp->O_D_ID == os_C_D_ID && p_order_tmp->O_C_ID == os_C_ID && p_order_tmp->O_ID > os_O_ID_max){ os_rid_in_orders_max=rid_in_orders; os_O_ID_max=p_order_tmp->O_ID; rid_in_orders++; }else{ rid_in_orders++; } } if(os_rid_in_orders_max == -1){ //orders中没有(os_C_W_ID、os_C_D_ID、os_C_ID)对应的记录 printf("Transaction Exception:\thave not found right record in order table!\n"); return; }else{ p_order_tmp=(struct order *)get(ORDER, os_rid_in_orders_max); printf("rid_in_orders:%d\tO_ID:%ld\tO_ENTRY_DATE:%ld\tO_CARRIER_ID:%ld\n", os_rid_in_orders_max, p_order_tmp->O_ID, //取定单代码 p_order_tmp->O_ENTRY_DATE, //制单日期 p_order_tmp->O_CARRIER_ID); //货运代码 } //在定单分录表(orderlines)中查询满足仓库代码(OL_W_ID)、地区代码(OL_D_ID)、定单代码(OL_O_ID)条件的所有记录。 struct order_line orderline_tmp; struct order_line *p_orderline_tmp; int rid_in_orderlines=0; struct order_line orderline_arr[MAX_PRINT_ORDER_LINE]; int count=0; while(1){ rid_in_orderlines = table_scan(ORDER_LINE, LONG, sizeof(long int), ((long)&(orderline_tmp.OL_W_ID)-(long)&(orderline_tmp.OL_O_ID)), EQ, &os_C_W_ID, rid_in_orderlines); //扫描整张orderlines表找出C_W_ID为os_C_W_ID的记录号 if(rid_in_orderlines == -1){ break; } p_orderline_tmp=(struct order_line *)get(ORDER_LINE, rid_in_orderlines); //获取os_C_W_ID对应的记录 if(p_orderline_tmp->OL_D_ID == os_C_D_ID && p_orderline_tmp->OL_O_ID == p_order_tmp->O_ID){ d_memcpy(&(orderline_arr[count]),p_orderline_tmp,sizeof(struct order_line)); count++; if(count==MAX_PRINT_ORDER_LINE){ break; } rid_in_orderlines++; }else{ rid_in_orderlines++; } } if(count==0){ printf("Transaction Exception:\thave not found right record in orderline table!\n"); return; }else{ int i,j,k; for(i=0;i<count;i++){ //根据商品代码排序并输出 k=i; for(j=i+1;j<count;j++){ if(orderline_arr[j].OL_I_ID>orderline_arr[k].OL_I_ID){ k=j; } } if(i!=k){ d_memcpy(&orderline_tmp,&orderline_arr[i],sizeof(struct order_line)); d_memcpy(&orderline_arr[i],&orderline_arr[k],sizeof(struct order_line)); d_memcpy(&orderline_arr[k],&orderline_tmp,sizeof(struct order_line)); } printf("OL_I_ID:%ld\tOL_SUPPLY_W_ID:%ld\tOL_QUANTITY:%lf\tOL_AMOUNT:%lf\tOL_DELIVERY_D:%ld\n", orderline_arr[i].OL_I_ID, orderline_arr[i].OL_SUPPLY_W_ID, //供应仓库代码 orderline_arr[i].OL_QUANTITY, //数量 orderline_arr[i].OL_AMOUNT, //金额 orderline_arr[i].OL_DELIVERY_D); //发货时间 } } //事务提交 printf("Order Status Transaction is finished!\n"); }
37f1ccbb713694ab8e55306871f161d95dce4a13.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> extern "C" { #include "grid.h" } extern "C" OTJ_Grid createDeviceGrid(OTJ_Grid host_grid) { OTJ_Grid device_grid = host_grid; hipMalloc(&device_grid.internal_storage, OTJ_Grid_Size(host_grid)); return device_grid; } extern "C" OTJ_Grid createAndCopyDeviceGrid(OTJ_Grid host_grid) { OTJ_Grid device_grid = createDeviceGrid(host_grid); hipMemcpy(device_grid.internal_storage, host_grid.internal_storage, OTJ_Grid_Size(device_grid), hipMemcpyHostToDevice); return device_grid; } extern "C" void retrieveDeviceGrid(OTJ_Grid host_grid, OTJ_Grid device_grid) { hipMemcpy(host_grid.internal_storage, device_grid.internal_storage, OTJ_Grid_Size(device_grid), hipMemcpyDeviceToHost); }
37f1ccbb713694ab8e55306871f161d95dce4a13.cu
#include <cuda.h> extern "C" { #include "grid.h" } extern "C" OTJ_Grid createDeviceGrid(OTJ_Grid host_grid) { OTJ_Grid device_grid = host_grid; cudaMalloc(&device_grid.internal_storage, OTJ_Grid_Size(host_grid)); return device_grid; } extern "C" OTJ_Grid createAndCopyDeviceGrid(OTJ_Grid host_grid) { OTJ_Grid device_grid = createDeviceGrid(host_grid); cudaMemcpy(device_grid.internal_storage, host_grid.internal_storage, OTJ_Grid_Size(device_grid), cudaMemcpyHostToDevice); return device_grid; } extern "C" void retrieveDeviceGrid(OTJ_Grid host_grid, OTJ_Grid device_grid) { cudaMemcpy(host_grid.internal_storage, device_grid.internal_storage, OTJ_Grid_Size(device_grid), cudaMemcpyDeviceToHost); }
61e0cfaa749d28bab337cacf6066dc36c9e2ae3f.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> //#include <cutil_inline.h> #include <stdio.h> #include <stdlib.h> #include <string> #include <iostream> #include <fstream> #define ASIZE 256 #define DATA_SIZE 1024 __device__ int shifts[ASIZE]; __device__ int results[DATA_SIZE]; __global__ void processPattern(char* x ,int m, int shifts[]) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx >= m ) return; char c = x[idx]; for( int i = m - 1; i >= idx; --i ) { if ( x[i] == c ) {// match is found shifts[c] = m - i; return; } } } __global__ void search(char *x, int m, char* y, int n, int shifts[], int indx[], int results[]) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx > (n - m) ) return; if ( indx[idx] != idx ) return; unsigned int yes = 1; for( int i = 0; i < m; ++i ) { // try to match the string if ( x[i] != y[idx + i] ) { yes = 0; break; } } results[idx] = yes; } void precomputeShiftIndx(char* y, int n, int m, int shifts[], int indx[]) { int j = 0; int limit = n - m; while (j <= limit ) { j += shifts[ y[j + m] ]; indx[j] = j; } } void display_results(int n, int results[]) { int j = 0; int flag =0; for( int i =0; i < n; ++i ) if ( results[i] == 1 ) { printf("%d. Found match at %d\n",j++, i); flag=1; } if(flag==0) printf("Not found\n"); } int main(int argc, char* argv[]) { srand(time(NULL)); char values[] = "ACGT"; int cuda_device = 0; int n = 10000000; // length of main string int m = 100; // length of substring char* mainString = (char*)malloc(n * sizeof(char)); char* subString = (char*)malloc(m * sizeof(char)); for(int i=0;i < n;i++) { mainString[i] = values[rand()%4]; } for(int i=0;i < m;i++) { subString[i] = values[rand()%4]; } // // Initialize the shift and index array // int* l_shifts = (int*)malloc( ASIZE * sizeof(int) ); for( int i = 0; i < ASIZE; ++i ) l_shifts[i] = m + 1; int* l_indx = (int*) malloc( n * sizeof(int) ); for( int i = 0; i < n; ++i ) l_indx[i] = -1; hipError_t error; hipEvent_t start_event, stop_event; float time; float time2; // initializing the GPU timers hipEventCreate(&start_event); hipEventCreate(&stop_event); // // Allocate global memory to host the pattern, text and other supporting data // structures // char* d_substr = 0; int* d_shifts = 0; int* d_indx = 0; char* d_text = 0; int *d_results = 0,*l_results=(int*) malloc( n * sizeof(int) ); for( int i = 0; i < n; ++i ) l_results[i] = 0; //hipGetSymbolAddress((void**)&d_shifts, "shifts"); hipMalloc((void**)&d_results, n * sizeof(int)) ; hipMalloc((void**)&d_shifts, sizeof(int) * ASIZE) ; //error = hipGetLastError(); //printf("Error1: %s\n", hipGetErrorString(error)); hipMalloc((void**)&d_indx, n * sizeof(int)) ; //error = hipGetLastError(); //printf("Error2: %s\n", hipGetErrorString(error)); hipMalloc((void**)&d_substr, (m + 1)*sizeof(char)) ; //error = hipGetLastError(); //printf("Error3: %s\n", hipGetErrorString(error)); hipMalloc((void**)&d_text, (strlen(mainString)+1)*sizeof(char)) ; //error = hipGetLastError(); //printf("Error4: %s\n", hipGetErrorString(error)); hipMemcpy(d_shifts, l_shifts, sizeof(int) * ASIZE, hipMemcpyHostToDevice ) ; hipMemcpy(d_results, l_results, sizeof(int) * n, hipMemcpyHostToDevice ) ; //error = hipGetLastError(); //printf("Error5: %s\n", hipGetErrorString(error)); hipMemcpy(d_text, mainString, sizeof(char)*(strlen(mainString)+1), hipMemcpyHostToDevice ) ; //error = hipGetLastError(); //printf("Error6: %s\n", hipGetErrorString(error)); hipMemcpy(d_substr, subString, sizeof(char)*(strlen(subString)+1), hipMemcpyHostToDevice) ; //error = hipGetLastError(); //printf("Error7: %s\n", hipGetErrorString(error)); // // Pre-process the pattern to be matched // dim3 threadsPerBlocks(ASIZE, 1); int t = m / threadsPerBlocks.x; int t1 = m % threadsPerBlocks.x; if ( t1 != 0 ) t += 1; dim3 numBlocks(t, 1); printf("Launching kernel with blocks=%d, threadsperblock=%d\n", numBlocks.x, threadsPerBlocks.x); hipEventRecord(start_event, 0); hipLaunchKernelGGL(( processPattern), dim3(numBlocks),dim3(threadsPerBlocks), 0, 0, d_substr, m, d_shifts); hipDeviceSynchronize(); hipEventRecord(stop_event, 0); hipEventSynchronize( stop_event ); hipEventElapsedTime( &time, start_event, stop_event ); hipMemcpy(l_shifts, d_shifts, sizeof(int) * ASIZE, hipMemcpyDeviceToHost ) ; //error = hipGetLastError(); //printf("Error8: %s\n", hipGetErrorString(error)); // // Transfer the pre-computed shift indexes from host to device memory // hipMemcpy(l_shifts, d_shifts, ASIZE * sizeof(int), hipMemcpyDeviceToHost) ; precomputeShiftIndx(mainString , n, m, l_shifts, l_indx); hipMemcpy(d_shifts, l_shifts, ASIZE * sizeof(int), hipMemcpyHostToDevice) ; hipMemcpy(d_indx, l_indx, n * sizeof(int), hipMemcpyHostToDevice) ; //error = hipGetLastError(); //printf("Error9: %s\n", hipGetErrorString(error)); // // Perform the actual search // t = n / threadsPerBlocks.x; t1 = n % threadsPerBlocks.x; if ( t1 != 0 ) t += 1; dim3 numBlocks2(t, 1); printf("Launching kernel with blocks=%d, threadsperblock=%d\n", numBlocks2.x, threadsPerBlocks.x); hipEventRecord(start_event, 0); hipLaunchKernelGGL(( search), dim3(numBlocks2),dim3(threadsPerBlocks), 0, 0, d_substr, m, d_text, n, d_shifts, d_indx,d_results); hipDeviceSynchronize(); hipEventRecord(stop_event, 0); hipEventSynchronize( stop_event ); hipEventElapsedTime( &time2, start_event, stop_event ); hipEventDestroy( start_event ); // cleanup hipEventDestroy( stop_event ); // cleanup printf("done and it took: %f+%f=%f milliseconds\n",time, time2, time+time2); //hipGetSymbolAddress((void**)&d_results, "results"); //hipMalloc((void**)&d_results, n * sizeof(int)) ; //int* l_results = (int*) malloc( n * sizeof(int) ); hipMemcpy(l_results, d_results, n * sizeof(int), hipMemcpyDeviceToHost) ; display_results(n, l_results); //error = hipGetLastError(); //printf("Error10: %s\n", hipGetErrorString(error)); hipFree(d_substr); hipFree(d_shifts); hipFree(d_indx); hipFree(d_text); free(mainString); free(subString); free(l_indx); free(l_shifts); free(l_results); hipDeviceReset(); return 0; }
61e0cfaa749d28bab337cacf6066dc36c9e2ae3f.cu
#include <cuda.h> //#include <cutil_inline.h> #include <stdio.h> #include <stdlib.h> #include <string> #include <iostream> #include <fstream> #define ASIZE 256 #define DATA_SIZE 1024 __device__ int shifts[ASIZE]; __device__ int results[DATA_SIZE]; __global__ void processPattern(char* x ,int m, int shifts[]) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx >= m ) return; char c = x[idx]; for( int i = m - 1; i >= idx; --i ) { if ( x[i] == c ) {// match is found shifts[c] = m - i; return; } } } __global__ void search(char *x, int m, char* y, int n, int shifts[], int indx[], int results[]) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx > (n - m) ) return; if ( indx[idx] != idx ) return; unsigned int yes = 1; for( int i = 0; i < m; ++i ) { // try to match the string if ( x[i] != y[idx + i] ) { yes = 0; break; } } results[idx] = yes; } void precomputeShiftIndx(char* y, int n, int m, int shifts[], int indx[]) { int j = 0; int limit = n - m; while (j <= limit ) { j += shifts[ y[j + m] ]; indx[j] = j; } } void display_results(int n, int results[]) { int j = 0; int flag =0; for( int i =0; i < n; ++i ) if ( results[i] == 1 ) { printf("%d. Found match at %d\n",j++, i); flag=1; } if(flag==0) printf("Not found\n"); } int main(int argc, char* argv[]) { srand(time(NULL)); char values[] = "ACGT"; int cuda_device = 0; int n = 10000000; // length of main string int m = 100; // length of substring char* mainString = (char*)malloc(n * sizeof(char)); char* subString = (char*)malloc(m * sizeof(char)); for(int i=0;i < n;i++) { mainString[i] = values[rand()%4]; } for(int i=0;i < m;i++) { subString[i] = values[rand()%4]; } // // Initialize the shift and index array // int* l_shifts = (int*)malloc( ASIZE * sizeof(int) ); for( int i = 0; i < ASIZE; ++i ) l_shifts[i] = m + 1; int* l_indx = (int*) malloc( n * sizeof(int) ); for( int i = 0; i < n; ++i ) l_indx[i] = -1; cudaError_t error; cudaEvent_t start_event, stop_event; float time; float time2; // initializing the GPU timers cudaEventCreate(&start_event); cudaEventCreate(&stop_event); // // Allocate global memory to host the pattern, text and other supporting data // structures // char* d_substr = 0; int* d_shifts = 0; int* d_indx = 0; char* d_text = 0; int *d_results = 0,*l_results=(int*) malloc( n * sizeof(int) ); for( int i = 0; i < n; ++i ) l_results[i] = 0; //cudaGetSymbolAddress((void**)&d_shifts, "shifts"); cudaMalloc((void**)&d_results, n * sizeof(int)) ; cudaMalloc((void**)&d_shifts, sizeof(int) * ASIZE) ; //error = cudaGetLastError(); //printf("Error1: %s\n", cudaGetErrorString(error)); cudaMalloc((void**)&d_indx, n * sizeof(int)) ; //error = cudaGetLastError(); //printf("Error2: %s\n", cudaGetErrorString(error)); cudaMalloc((void**)&d_substr, (m + 1)*sizeof(char)) ; //error = cudaGetLastError(); //printf("Error3: %s\n", cudaGetErrorString(error)); cudaMalloc((void**)&d_text, (strlen(mainString)+1)*sizeof(char)) ; //error = cudaGetLastError(); //printf("Error4: %s\n", cudaGetErrorString(error)); cudaMemcpy(d_shifts, l_shifts, sizeof(int) * ASIZE, cudaMemcpyHostToDevice ) ; cudaMemcpy(d_results, l_results, sizeof(int) * n, cudaMemcpyHostToDevice ) ; //error = cudaGetLastError(); //printf("Error5: %s\n", cudaGetErrorString(error)); cudaMemcpy(d_text, mainString, sizeof(char)*(strlen(mainString)+1), cudaMemcpyHostToDevice ) ; //error = cudaGetLastError(); //printf("Error6: %s\n", cudaGetErrorString(error)); cudaMemcpy(d_substr, subString, sizeof(char)*(strlen(subString)+1), cudaMemcpyHostToDevice) ; //error = cudaGetLastError(); //printf("Error7: %s\n", cudaGetErrorString(error)); // // Pre-process the pattern to be matched // dim3 threadsPerBlocks(ASIZE, 1); int t = m / threadsPerBlocks.x; int t1 = m % threadsPerBlocks.x; if ( t1 != 0 ) t += 1; dim3 numBlocks(t, 1); printf("Launching kernel with blocks=%d, threadsperblock=%d\n", numBlocks.x, threadsPerBlocks.x); cudaEventRecord(start_event, 0); processPattern<<<numBlocks,threadsPerBlocks>>>(d_substr, m, d_shifts); cudaThreadSynchronize(); cudaEventRecord(stop_event, 0); cudaEventSynchronize( stop_event ); cudaEventElapsedTime( &time, start_event, stop_event ); cudaMemcpy(l_shifts, d_shifts, sizeof(int) * ASIZE, cudaMemcpyDeviceToHost ) ; //error = cudaGetLastError(); //printf("Error8: %s\n", cudaGetErrorString(error)); // // Transfer the pre-computed shift indexes from host to device memory // cudaMemcpy(l_shifts, d_shifts, ASIZE * sizeof(int), cudaMemcpyDeviceToHost) ; precomputeShiftIndx(mainString , n, m, l_shifts, l_indx); cudaMemcpy(d_shifts, l_shifts, ASIZE * sizeof(int), cudaMemcpyHostToDevice) ; cudaMemcpy(d_indx, l_indx, n * sizeof(int), cudaMemcpyHostToDevice) ; //error = cudaGetLastError(); //printf("Error9: %s\n", cudaGetErrorString(error)); // // Perform the actual search // t = n / threadsPerBlocks.x; t1 = n % threadsPerBlocks.x; if ( t1 != 0 ) t += 1; dim3 numBlocks2(t, 1); printf("Launching kernel with blocks=%d, threadsperblock=%d\n", numBlocks2.x, threadsPerBlocks.x); cudaEventRecord(start_event, 0); search<<<numBlocks2,threadsPerBlocks>>>(d_substr, m, d_text, n, d_shifts, d_indx,d_results); cudaThreadSynchronize(); cudaEventRecord(stop_event, 0); cudaEventSynchronize( stop_event ); cudaEventElapsedTime( &time2, start_event, stop_event ); cudaEventDestroy( start_event ); // cleanup cudaEventDestroy( stop_event ); // cleanup printf("done and it took: %f+%f=%f milliseconds\n",time, time2, time+time2); //cudaGetSymbolAddress((void**)&d_results, "results"); //cudaMalloc((void**)&d_results, n * sizeof(int)) ; //int* l_results = (int*) malloc( n * sizeof(int) ); cudaMemcpy(l_results, d_results, n * sizeof(int), cudaMemcpyDeviceToHost) ; display_results(n, l_results); //error = cudaGetLastError(); //printf("Error10: %s\n", cudaGetErrorString(error)); cudaFree(d_substr); cudaFree(d_shifts); cudaFree(d_indx); cudaFree(d_text); free(mainString); free(subString); free(l_indx); free(l_shifts); free(l_results); cudaThreadExit(); return 0; }
78c961d6fbb375a5302c08ab2723c65af37d13e9.hip
// !!! This is a file automatically generated by hipify!!! #ifndef gpu_gpuUtils_cu #define gpu_gpuUtils_cu #include "Utils.h" __device__ void calculateChannelOffsets(int offset, int blockIndex, int blockDimension,int threadIndex, int* rOffset, int* gOffset, int* bOffset) { *rOffset = blockIndex * blockDimension + threadIndex; *gOffset = *rOffset + 1*offset; *bOffset = *rOffset + 2*offset; } void sendWarmUpSignal(int* h_data, const unsigned int sizeData) { int* d_data; hipMalloc((void**)&d_data,sizeData); hipMemcpy(d_data,h_data,sizeData,hipMemcpyHostToDevice); hipFree(d_data); } void startSetup(int width, int height, int channels,int* problemSize, int* sizeData, int* sizeResult,gpu::Setup* setup) { *problemSize = width*height*channels; *sizeData = *problemSize * sizeof(unsigned char); *sizeResult = *sizeData; gpu::getSetupConfig(width*height,setup); } #endif //gpu_gpuUtils_cu
78c961d6fbb375a5302c08ab2723c65af37d13e9.cu
#ifndef gpu_gpuUtils_cu #define gpu_gpuUtils_cu #include "Utils.h" __device__ void calculateChannelOffsets(int offset, int blockIndex, int blockDimension,int threadIndex, int* rOffset, int* gOffset, int* bOffset) { *rOffset = blockIndex * blockDimension + threadIndex; *gOffset = *rOffset + 1*offset; *bOffset = *rOffset + 2*offset; } void sendWarmUpSignal(int* h_data, const unsigned int sizeData) { int* d_data; cudaMalloc((void**)&d_data,sizeData); cudaMemcpy(d_data,h_data,sizeData,cudaMemcpyHostToDevice); cudaFree(d_data); } void startSetup(int width, int height, int channels,int* problemSize, int* sizeData, int* sizeResult,gpu::Setup* setup) { *problemSize = width*height*channels; *sizeData = *problemSize * sizeof(unsigned char); *sizeResult = *sizeData; gpu::getSetupConfig(width*height,setup); } #endif //gpu_gpuUtils_cu
54470a0d9c7e24d6199ccfb698b95423d9617202.hip
// !!! This is a file automatically generated by hipify!!! /* * gpu_util.cu -- GPU utility functions * * Copyright (C) 2010-2013, Computing Systems Laboratory (CSLab) * Copyright (C) 2010-2013, Vasileios Karakasis */ #include <hip/hip_runtime.h> #include "gpu_util.h" /* Initialize the CUDA runtime */ void gpu_init() { hipFree(0); } void *gpu_alloc(size_t count) { void *ret; if (hipMalloc(&ret, count) != hipSuccess) { ret = NULL; } return ret; } void gpu_free(void *gpuptr) { hipFree(gpuptr); } int copy_to_gpu(const void *host, void *gpu, size_t count) { if (hipMemcpy(gpu, host, count, hipMemcpyHostToDevice) != hipSuccess) return -1; return 0; } int copy_from_gpu(void *host, const void *gpu, size_t count) { if (hipMemcpy(host, gpu, count, hipMemcpyDeviceToHost) != hipSuccess) return -1; return 0; } const char *gpu_get_errmsg(hipError_t err) { return hipGetErrorString(err); } const char *gpu_get_last_errmsg() { return gpu_get_errmsg(hipGetLastError()); }
54470a0d9c7e24d6199ccfb698b95423d9617202.cu
/* * gpu_util.cu -- GPU utility functions * * Copyright (C) 2010-2013, Computing Systems Laboratory (CSLab) * Copyright (C) 2010-2013, Vasileios Karakasis */ #include <cuda.h> #include "gpu_util.h" /* Initialize the CUDA runtime */ void gpu_init() { cudaFree(0); } void *gpu_alloc(size_t count) { void *ret; if (cudaMalloc(&ret, count) != cudaSuccess) { ret = NULL; } return ret; } void gpu_free(void *gpuptr) { cudaFree(gpuptr); } int copy_to_gpu(const void *host, void *gpu, size_t count) { if (cudaMemcpy(gpu, host, count, cudaMemcpyHostToDevice) != cudaSuccess) return -1; return 0; } int copy_from_gpu(void *host, const void *gpu, size_t count) { if (cudaMemcpy(host, gpu, count, cudaMemcpyDeviceToHost) != cudaSuccess) return -1; return 0; } const char *gpu_get_errmsg(cudaError_t err) { return cudaGetErrorString(err); } const char *gpu_get_last_errmsg() { return gpu_get_errmsg(cudaGetLastError()); }
fa8eaa7de94723417e1479ff243588488479e8e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <functional> #include <layers/reduce_sum_layer.hpp> #include <utils.cuh> #include <utils.hpp> #ifndef NDEBUG #include <iostream> #endif namespace HugeCTR { namespace { template <size_t length, typename T> __device__ int array_length(T (&arr)[length]) { return length; } // this kernel can support dims_size=1/2/3 template <typename T, typename... Args> __global__ void reduce_sum_kernel(const T* input, T* output, int axis, Args... args) { size_t in_dims[] = {args...}; int dims_size = array_length(in_dims); T local_sum = 0.0f; if (axis == 0) { // block_num = dim1 * dim2, do dim0 number of elements reduction in one block if (dims_size == 1) { // dims_size == 1 for (int tid = threadIdx.x; tid < in_dims[0]; tid += blockDim.x) { local_sum += input[tid]; } } else if (dims_size == 2) { // dims_size == 2 for (int tid = threadIdx.x; tid < in_dims[0]; tid += blockDim.x) { local_sum += input[tid * in_dims[1] + blockIdx.x]; } } else if (dims_size == 3) { // dims_size == 3 for (int tid = threadIdx.x; tid < in_dims[0]; tid += blockDim.x) { local_sum += input[tid * (in_dims[1] * in_dims[2]) + blockIdx.x]; } } } else if (axis == 1) { // block_num = dim0 * dim2, do dim1 number of elements reduction in one block if (dims_size == 2) { // dims_size == 2 for (int tid = threadIdx.x; tid < in_dims[1]; tid += blockDim.x) { local_sum += input[blockIdx.x * in_dims[1] + tid]; } } else if (dims_size == 3) { // dims_size == 3 for (int tid = threadIdx.x; tid < in_dims[1]; tid += blockDim.x) { local_sum += input[blockIdx.x / in_dims[2] * (in_dims[1] * in_dims[2]) + tid * in_dims[2] + blockIdx.x % in_dims[2]]; } } } else if (axis == 2) { // block_num = dim0 * dim1, do dim2 number of elements reduction in one block for (int tid = threadIdx.x; tid < in_dims[2]; tid += blockDim.x) { local_sum += input[blockIdx.x * in_dims[2] + tid]; } } local_sum = blockReduceSum(local_sum); if (threadIdx.x == 0) { output[blockIdx.x] = local_sum; } } template <typename T, typename... Args> __global__ void reduce_sum_dgrad_kernel(const T* top_grad, T* dgrad, int axis, Args... args) { int tid = blockIdx.x * blockDim.x + threadIdx.x; size_t in_dims[] = {args...}; int dims_size = array_length(in_dims); if (axis == 0) { if (dims_size == 1) { // dims_size == 1 if (tid < in_dims[0]) { dgrad[tid] = top_grad[0]; } } else if (dims_size == 2) { // dims_size == 2 if (tid < (in_dims[0] * in_dims[1])) { dgrad[tid] = top_grad[tid % in_dims[1]]; } } else if (dims_size == 3) { // dims_size == 3 if (tid < (in_dims[0] * in_dims[1] * in_dims[2])) { int dim1_index = tid % (in_dims[1] * in_dims[2]) / in_dims[2]; int dim2_index = tid % in_dims[2]; dgrad[tid] = top_grad[dim1_index * in_dims[2] + dim2_index]; } } } else if (axis == 1) { if (dims_size == 2) { // dims_size == 2 if (tid < (in_dims[0] * in_dims[1])) { dgrad[tid] = top_grad[tid / in_dims[1]]; } } else if (dims_size == 3) { // dims_size == 3 if (tid < (in_dims[0] * in_dims[1] * in_dims[2])) { int dim0_index = tid / (in_dims[1] * in_dims[2]); int dim2_index = tid % in_dims[2]; dgrad[tid] = top_grad[dim0_index * in_dims[2] + dim2_index]; } } } else if (axis == 2) { int dim0_index = tid / (in_dims[1] * in_dims[2]); int dim1_index = tid % (in_dims[1] * in_dims[2]) / in_dims[2]; dgrad[tid] = top_grad[dim0_index * in_dims[1] + dim1_index]; } } } // end of namespace template <typename T> ReduceSumLayer<T>::ReduceSumLayer(const Tensor2<T>& in_tensor, Tensor2<T>& out_tensor, const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff, int axis, const std::shared_ptr<GPUResource>& gpu_resource) : Layer(gpu_resource), axis_(axis) { try { // error input checking const auto& in_dims = in_tensor.get_dimensions(); for (auto i : in_dims) { if (i == 0) { CK_THROW_(Error_t::WrongInput, "The input dims can not be 0"); } } if (axis >= (int)(in_dims.size()) || axis < 0) { CK_THROW_(Error_t::WrongInput, "The axis is overflow"); } std::vector<size_t> out_dims(in_dims.size()); for (int i = 0; i < (int)(in_dims.size()); i++) { if (i == axis) { out_dims[i] = 1; } else { out_dims[i] = in_dims[i]; } } blobs_buff->reserve(out_dims, &out_tensor); out_tensors_.push_back(out_tensor); in_tensors_.push_back(in_tensor); } catch (const std::runtime_error& rt_err) { std::cerr << rt_err.what() << std::endl; throw; } } template <typename T> void ReduceSumLayer<T>::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); T* input = in_tensors_[0].get_ptr(); T* output = out_tensors_[0].get_ptr(); auto in_dims = in_tensors_[0].get_dimensions(); auto out_dims = out_tensors_[0].get_dimensions(); int block_num = 1; for (auto dim : out_dims) { block_num *= dim; } dim3 blockSize(256, 1, 1); dim3 gridSize(block_num, 1, 1); if (in_dims.size() == 1) { hipLaunchKernelGGL(( reduce_sum_kernel), dim3(gridSize), dim3(blockSize), 0, get_gpu().get_stream(), input, output, axis_, in_dims[0]); } else if (in_dims.size() == 2) { hipLaunchKernelGGL(( reduce_sum_kernel), dim3(gridSize), dim3(blockSize), 0, get_gpu().get_stream(), input, output, axis_, in_dims[0], in_dims[1]); } else if (in_dims.size() == 3) { hipLaunchKernelGGL(( reduce_sum_kernel), dim3(gridSize), dim3(blockSize), 0, get_gpu().get_stream(), input, output, axis_, in_dims[0], in_dims[1], in_dims[2]); } #ifndef NDEBUG hipDeviceSynchronize(); CK_CUDA_THROW_(hipGetLastError()); #endif } template <typename T> void ReduceSumLayer<T>::bprop() { CudaDeviceContext context(get_device_id()); T* input = in_tensors_[0].get_ptr(); T* output = out_tensors_[0].get_ptr(); auto in_dims = in_tensors_[0].get_dimensions(); int size = 1; for (auto dim : in_dims) { size *= dim; } dim3 blockSize(256, 1, 1); dim3 gridSize((size + blockSize.x - 1) / blockSize.x, 1, 1); if (in_dims.size() == 1) { hipLaunchKernelGGL(( reduce_sum_dgrad_kernel), dim3(gridSize), dim3(blockSize), 0, get_gpu().get_stream(), output, input, axis_, in_dims[0]); } else if (in_dims.size() == 2) { hipLaunchKernelGGL(( reduce_sum_dgrad_kernel), dim3(gridSize), dim3(blockSize), 0, get_gpu().get_stream(), output, input, axis_, in_dims[0], in_dims[1]); } else if (in_dims.size() == 3) { hipLaunchKernelGGL(( reduce_sum_dgrad_kernel), dim3(gridSize), dim3(blockSize), 0, get_gpu().get_stream(), output, input, axis_, in_dims[0], in_dims[1], in_dims[2]); } #ifndef NDEBUG hipDeviceSynchronize(); CK_CUDA_THROW_(hipGetLastError()); #endif } template class ReduceSumLayer<float>; template class ReduceSumLayer<__half>; } // namespace HugeCTR
fa8eaa7de94723417e1479ff243588488479e8e4.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <functional> #include <layers/reduce_sum_layer.hpp> #include <utils.cuh> #include <utils.hpp> #ifndef NDEBUG #include <iostream> #endif namespace HugeCTR { namespace { template <size_t length, typename T> __device__ int array_length(T (&arr)[length]) { return length; } // this kernel can support dims_size=1/2/3 template <typename T, typename... Args> __global__ void reduce_sum_kernel(const T* input, T* output, int axis, Args... args) { size_t in_dims[] = {args...}; int dims_size = array_length(in_dims); T local_sum = 0.0f; if (axis == 0) { // block_num = dim1 * dim2, do dim0 number of elements reduction in one block if (dims_size == 1) { // dims_size == 1 for (int tid = threadIdx.x; tid < in_dims[0]; tid += blockDim.x) { local_sum += input[tid]; } } else if (dims_size == 2) { // dims_size == 2 for (int tid = threadIdx.x; tid < in_dims[0]; tid += blockDim.x) { local_sum += input[tid * in_dims[1] + blockIdx.x]; } } else if (dims_size == 3) { // dims_size == 3 for (int tid = threadIdx.x; tid < in_dims[0]; tid += blockDim.x) { local_sum += input[tid * (in_dims[1] * in_dims[2]) + blockIdx.x]; } } } else if (axis == 1) { // block_num = dim0 * dim2, do dim1 number of elements reduction in one block if (dims_size == 2) { // dims_size == 2 for (int tid = threadIdx.x; tid < in_dims[1]; tid += blockDim.x) { local_sum += input[blockIdx.x * in_dims[1] + tid]; } } else if (dims_size == 3) { // dims_size == 3 for (int tid = threadIdx.x; tid < in_dims[1]; tid += blockDim.x) { local_sum += input[blockIdx.x / in_dims[2] * (in_dims[1] * in_dims[2]) + tid * in_dims[2] + blockIdx.x % in_dims[2]]; } } } else if (axis == 2) { // block_num = dim0 * dim1, do dim2 number of elements reduction in one block for (int tid = threadIdx.x; tid < in_dims[2]; tid += blockDim.x) { local_sum += input[blockIdx.x * in_dims[2] + tid]; } } local_sum = blockReduceSum(local_sum); if (threadIdx.x == 0) { output[blockIdx.x] = local_sum; } } template <typename T, typename... Args> __global__ void reduce_sum_dgrad_kernel(const T* top_grad, T* dgrad, int axis, Args... args) { int tid = blockIdx.x * blockDim.x + threadIdx.x; size_t in_dims[] = {args...}; int dims_size = array_length(in_dims); if (axis == 0) { if (dims_size == 1) { // dims_size == 1 if (tid < in_dims[0]) { dgrad[tid] = top_grad[0]; } } else if (dims_size == 2) { // dims_size == 2 if (tid < (in_dims[0] * in_dims[1])) { dgrad[tid] = top_grad[tid % in_dims[1]]; } } else if (dims_size == 3) { // dims_size == 3 if (tid < (in_dims[0] * in_dims[1] * in_dims[2])) { int dim1_index = tid % (in_dims[1] * in_dims[2]) / in_dims[2]; int dim2_index = tid % in_dims[2]; dgrad[tid] = top_grad[dim1_index * in_dims[2] + dim2_index]; } } } else if (axis == 1) { if (dims_size == 2) { // dims_size == 2 if (tid < (in_dims[0] * in_dims[1])) { dgrad[tid] = top_grad[tid / in_dims[1]]; } } else if (dims_size == 3) { // dims_size == 3 if (tid < (in_dims[0] * in_dims[1] * in_dims[2])) { int dim0_index = tid / (in_dims[1] * in_dims[2]); int dim2_index = tid % in_dims[2]; dgrad[tid] = top_grad[dim0_index * in_dims[2] + dim2_index]; } } } else if (axis == 2) { int dim0_index = tid / (in_dims[1] * in_dims[2]); int dim1_index = tid % (in_dims[1] * in_dims[2]) / in_dims[2]; dgrad[tid] = top_grad[dim0_index * in_dims[1] + dim1_index]; } } } // end of namespace template <typename T> ReduceSumLayer<T>::ReduceSumLayer(const Tensor2<T>& in_tensor, Tensor2<T>& out_tensor, const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff, int axis, const std::shared_ptr<GPUResource>& gpu_resource) : Layer(gpu_resource), axis_(axis) { try { // error input checking const auto& in_dims = in_tensor.get_dimensions(); for (auto i : in_dims) { if (i == 0) { CK_THROW_(Error_t::WrongInput, "The input dims can not be 0"); } } if (axis >= (int)(in_dims.size()) || axis < 0) { CK_THROW_(Error_t::WrongInput, "The axis is overflow"); } std::vector<size_t> out_dims(in_dims.size()); for (int i = 0; i < (int)(in_dims.size()); i++) { if (i == axis) { out_dims[i] = 1; } else { out_dims[i] = in_dims[i]; } } blobs_buff->reserve(out_dims, &out_tensor); out_tensors_.push_back(out_tensor); in_tensors_.push_back(in_tensor); } catch (const std::runtime_error& rt_err) { std::cerr << rt_err.what() << std::endl; throw; } } template <typename T> void ReduceSumLayer<T>::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); T* input = in_tensors_[0].get_ptr(); T* output = out_tensors_[0].get_ptr(); auto in_dims = in_tensors_[0].get_dimensions(); auto out_dims = out_tensors_[0].get_dimensions(); int block_num = 1; for (auto dim : out_dims) { block_num *= dim; } dim3 blockSize(256, 1, 1); dim3 gridSize(block_num, 1, 1); if (in_dims.size() == 1) { reduce_sum_kernel<<<gridSize, blockSize, 0, get_gpu().get_stream()>>>(input, output, axis_, in_dims[0]); } else if (in_dims.size() == 2) { reduce_sum_kernel<<<gridSize, blockSize, 0, get_gpu().get_stream()>>>(input, output, axis_, in_dims[0], in_dims[1]); } else if (in_dims.size() == 3) { reduce_sum_kernel<<<gridSize, blockSize, 0, get_gpu().get_stream()>>>( input, output, axis_, in_dims[0], in_dims[1], in_dims[2]); } #ifndef NDEBUG cudaDeviceSynchronize(); CK_CUDA_THROW_(cudaGetLastError()); #endif } template <typename T> void ReduceSumLayer<T>::bprop() { CudaDeviceContext context(get_device_id()); T* input = in_tensors_[0].get_ptr(); T* output = out_tensors_[0].get_ptr(); auto in_dims = in_tensors_[0].get_dimensions(); int size = 1; for (auto dim : in_dims) { size *= dim; } dim3 blockSize(256, 1, 1); dim3 gridSize((size + blockSize.x - 1) / blockSize.x, 1, 1); if (in_dims.size() == 1) { reduce_sum_dgrad_kernel<<<gridSize, blockSize, 0, get_gpu().get_stream()>>>(output, input, axis_, in_dims[0]); } else if (in_dims.size() == 2) { reduce_sum_dgrad_kernel<<<gridSize, blockSize, 0, get_gpu().get_stream()>>>( output, input, axis_, in_dims[0], in_dims[1]); } else if (in_dims.size() == 3) { reduce_sum_dgrad_kernel<<<gridSize, blockSize, 0, get_gpu().get_stream()>>>( output, input, axis_, in_dims[0], in_dims[1], in_dims[2]); } #ifndef NDEBUG cudaDeviceSynchronize(); CK_CUDA_THROW_(cudaGetLastError()); #endif } template class ReduceSumLayer<float>; template class ReduceSumLayer<__half>; } // namespace HugeCTR
5d74bf0bd0e6eef4ead4dcf17984ee76dbe83619.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <math.h> #include <ctime> #include <cstdlib> #include <chrono> #include <hip/hip_runtime.h> using namespace std; struct stats { double mean; double min; double max; double stddev; }; // CPU function to find mean of an array double cpu_get_mean(int n, double *x) { double sum = 0; for (int i = 0; i < n; i++) { sum += x[i]; } return sum/n; } // use CPU to calculate std deviation (Welford's algorithm) double cpu_get_stddev(int n, double *x){ double mean = x[0]; double m2 = 0; double delta; double delta2; for (int i = 1; i < n; i++){ delta = x[i] - mean; mean += delta/(i+1); delta2 = x[i] - mean; m2 += delta * delta2; } return sqrt(m2/n); } // CPU function to find max element of an array double cpu_get_max(int n, double *x) { double max = x[0]; for (int i = 1; i < n; i++) { max = (max < x[i]) ? x[i] : max; } return max; } // CPU function to find min element of an array double cpu_get_min(int n, double *x) { double min = x[0]; for (int i = 1; i < n; i++) { min = (x[i] < min) ? x[i] : min; } return min; } // use CPU to calculate min, mean, max, std deviation (Welford's algorithm) stats cpu_get_all(int n, double *x){ stats myStats; double mean = x[0]; double min = x[0]; double max = x[0]; double m2 = 0; double delta; double delta2; for (int i = 1; i < n; i++){ max = (max < x[i]) ? x[i] : max; min = (x[i] < min) ? x[i] : min; delta = x[i] - mean; mean += delta/(i+1); delta2 = x[i] - mean; m2 += delta * delta2; } myStats.mean = mean; myStats.min = min; myStats.max = max; myStats.stddev = sqrt(m2/n); return myStats; } // Kernel function to find the maximum element of an array __global__ void get_gpu_max(int n, double *x, double *results) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; double max = x[index]; for (int i = index + stride; i < n; i += stride) { max = (max < x[i]) ? x[i] : max; } results[index] = max; } // Kernel function to find the minimum element of an array __global__ void get_gpu_min(int n, double *x, double *results) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; double min = x[index]; for (int i = index + stride; i < n; i += stride) { min = (x[i] < min) ? x[i] : min; } results[index] = min; } // kernel to calculate the mean on the GPU __global__ void get_gpu_mean(int n, double *x, double *results) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; double mean = x[index]; int count = 1; for (int i = index + stride; i < n; i += stride){ count++; mean += (x[i] - mean)/count; } results[index] = mean; } // Calculate std deviation on the GPU __global__ void get_gpu_stddev(int n, double *x, double *results){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; double mean = x[index]; double m2 = 0; double delta; double delta2; int count = 1; for (int i = index + stride; i < n; i += stride){ count++; delta = x[i] - mean; mean += delta/count; delta2 = x[i] - mean; m2 += delta * delta2; } results[index] = m2; } // caluclate all stats on the GPU __global__ void get_gpu_all(int n, double *x, stats *all_results){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; double mean = x[index]; double min = x[index]; double max = x[index]; double m2 = 0; double delta; double delta2; int count = 1; for (int i = index + stride; i < n; i += stride){ max = (max < x[i]) ? x[i] : max; min = (x[i] < min) ? x[i] : min; count++; delta = x[i] - mean; mean += delta/count; delta2 = x[i] - mean; m2 += delta * delta2; } all_results[index].mean = mean; all_results[index].min = min; all_results[index].max = max; all_results[index].stddev = m2; // m2 not actually std dev } void print_diff(double x, double y){ cout << "Difference: " << 100*(y - x)/x << "%\n"; } void run_tests(int N_pre, int N_BLOCKS, int THREADS_PER_BLK) { // We need N to be a multiple of N_THREADS int N = N_BLOCKS * THREADS_PER_BLK * floor(N_pre / (THREADS_PER_BLK * N_BLOCKS)); /** cout << "N = " << N << endl; cout << "N_BLOCKS = " << N_BLOCKS << endl; cout << "THREADS_PER_BLK = " << THREADS_PER_BLK << endl; cout << "Allocating memory and initializing..."; **/ double *x; hipMallocManaged(&x, N*sizeof(double)); srand(time(NULL)); for (int i = 0; i < N; i++) { x[i] = ((double) rand()) / ((double) RAND_MAX); } double *results; hipMallocManaged(&results, N_BLOCKS*THREADS_PER_BLK*sizeof(double)); // use CPU to calculate max auto start = std::chrono::high_resolution_clock::now(); double cpu_max = cpu_get_max(N, x); auto end = std::chrono::high_resolution_clock::now(); auto dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); cout << "CPU calculated max:" << fixed << cpu_max << "_____"; // fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); fprintf(stdout,"%d,%d,%d,max,cpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); //fprintf(stdout," ,%ld\n",cpu_max); // use GPU to calculate max start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( get_gpu_max), dim3(N_BLOCKS), dim3(THREADS_PER_BLK), 0, 0, N, x, results); hipDeviceSynchronize(); double gpu_max = results[0]; for (int i = 1; i < N_BLOCKS*THREADS_PER_BLK; i++) { gpu_max = (gpu_max < results[i]) ? results[i] : gpu_max; } end = std::chrono::high_resolution_clock::now(); dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); cout << "GPU calculated max:" << fixed << gpu_max << "_____"; //fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); fprintf(stdout,"%d,%d,%d,max,gpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); //fprintf(stdout," ,%ld\n",gpu_max); //print_diff(cpu_max, gpu_max); //cout << endl; // use CPU to calculate min start = std::chrono::high_resolution_clock::now(); double cpu_min = cpu_get_min(N, x); end = std::chrono::high_resolution_clock::now(); dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); cout << "CPU calculated min:" << fixed << cpu_min << "_____"; //fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); fprintf(stdout,"%d,%d,%d,min,cpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); //fprintf(stdout," ,%ld\n",cpu_min); // use GPU to calculate min start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( get_gpu_min), dim3(N_BLOCKS), dim3(THREADS_PER_BLK), 0, 0, N, x, results); hipDeviceSynchronize(); double gpu_min = results[0]; for (int i = 1; i < N_BLOCKS*THREADS_PER_BLK; i++) { gpu_min = (results[i] < gpu_min) ? results[i] : gpu_min; } end = std::chrono::high_resolution_clock::now(); dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); cout << "GPU calculated min:" << fixed << gpu_min << "_____"; //fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); fprintf(stdout,"%d,%d,%d,min,gpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); //fprintf(stdout," ,%ld\n",gpu_min); //print_diff(cpu_min, gpu_min); //cout << endl; // use CPU to calculate mean start = std::chrono::high_resolution_clock::now(); double cpu_mean = cpu_get_mean(N, x); end = std::chrono::high_resolution_clock::now(); dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); cout << "CPU calculated mean:" << fixed << cpu_mean << "_____"; //fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); fprintf(stdout,"%d,%d,%d,avg,cpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); //fprintf(stdout," ,%ld\n",cpu_mean); // use GPU to calculate mean start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( get_gpu_mean), dim3(N_BLOCKS), dim3(THREADS_PER_BLK), 0, 0, N, x, results); hipDeviceSynchronize(); double gpu_mean_sum = 0; for (int i = 0; i < N_BLOCKS*THREADS_PER_BLK; i++) { gpu_mean_sum += results[i]; } double gpu_mean = gpu_mean_sum/(N_BLOCKS*THREADS_PER_BLK); end = std::chrono::high_resolution_clock::now(); dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); cout << "GPU calculated mean:" << fixed << gpu_mean << "_____"; //fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); fprintf(stdout,"%d,%d,%d,avg,gpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); //fprintf(stdout," ,%ld\n",gpu_mean); //print_diff(cpu_mean, gpu_mean); //cout << endl; // use CPU to calculate std dev start = std::chrono::high_resolution_clock::now(); double cpu_stddev = cpu_get_stddev(N, x); end = std::chrono::high_resolution_clock::now(); dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); cout << "CPU calculated std dev:" << fixed << cpu_stddev << "_____"; //fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); fprintf(stdout,"%d,%d,%d,dev,cpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); //fprintf(stdout," ,%ld\n",cpu_stddev); // use GPU to calculate std dev start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( get_gpu_stddev), dim3(N_BLOCKS), dim3(THREADS_PER_BLK), 0, 0, N, x, results); hipDeviceSynchronize(); double gpu_m2 = 0; for (int i = 0; i < N_BLOCKS*THREADS_PER_BLK; i++) { gpu_m2 += results[i]; } double gpu_stddev = sqrt(gpu_m2/N); end = std::chrono::high_resolution_clock::now(); dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); cout << "GPU calculated std dev:" << fixed << gpu_stddev << "_____"; //fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); fprintf(stdout,"%d,%d,%d,dev,gpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); /** print_diff(cpu_stddev, gpu_stddev); cout << endl; **/ //fprintf(stdout," ,%ld\n",gpu_stddev); // use CPU to calculate all stats start = std::chrono::high_resolution_clock::now(); stats my_stats = cpu_get_all(N, x); end = std::chrono::high_resolution_clock::now(); dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); /** cout << "Concurrent: CPU calculated max:" << fixed << my_stats.max << endl; cout << "Concurrent: CPU calculated min:" << fixed << my_stats.min << endl; cout << "Concurrent: CPU calculated mean:" << fixed << my_stats.mean << endl; cout << "Concurrent: CPU calculated std dev:" << fixed << my_stats.stddev << endl; fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); **/ fprintf(stdout,"%d,%d,%d,all,cpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); hipFree(results); // use GPU to calculate all stats stats* all_results; hipMallocManaged(&all_results, N_BLOCKS*THREADS_PER_BLK*sizeof(stats)); // start the timer start = std::chrono::high_resolution_clock::now(); // run calculations on the GPU hipLaunchKernelGGL(( get_gpu_all), dim3(N_BLOCKS), dim3(THREADS_PER_BLK), 0, 0, N, x, all_results); // synchrnonize hipDeviceSynchronize(); // We now need to accumulate results from all threads double m2 = all_results[0].stddev; double mean = all_results[0].mean; double delta; double new_mean; int n_a = N / (N_BLOCKS*THREADS_PER_BLK); int n_b = n_a; double max = all_results[0].max; double min = all_results[0].min; for (int i = 1; i < N_BLOCKS*THREADS_PER_BLK; i++) { new_mean = all_results[i].mean; delta = new_mean - mean; // we update our running mean value mean = (n_a*mean + n_b*new_mean)/(n_a + n_b); m2 += all_results[i].stddev + delta * delta * n_a * n_b / (n_a + n_b); n_a += n_b; min = (all_results[i].min < min) ? all_results[i].min : min; max = (all_results[i].max > max) ? all_results[i].max : max; } double stddev = sqrt(m2/N); end = std::chrono::high_resolution_clock::now(); dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); /** cout << "Concurrent: GPU calculated max:" << fixed << max << endl; cout << "Concurrent: GPU calculated min:" << fixed << min << endl; cout << "Concurrent: GPU calculated mean:" << fixed << mean << endl; cout << "Concurrent: GPU calculated std dev:" << fixed << stddev << endl; fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); **/ fprintf(stdout,"%d,%d,%d,all,gpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); // Free memory hipFree(x); hipFree(all_results); } int main(void) { // We want to display floats with max precision cout.precision(17); int Ns[] = {50000000,100000000,150000000}; int TPBs[] = {256,512}; int NBs[] = {1,4}; for (int n : Ns) { for (int threads_per_block : TPBs) { for (int n_blocks : NBs) { run_tests(n, n_blocks, threads_per_block); } } } }
5d74bf0bd0e6eef4ead4dcf17984ee76dbe83619.cu
#include <iostream> #include <math.h> #include <ctime> #include <cstdlib> #include <chrono> #include <cuda_runtime.h> using namespace std; struct stats { double mean; double min; double max; double stddev; }; // CPU function to find mean of an array double cpu_get_mean(int n, double *x) { double sum = 0; for (int i = 0; i < n; i++) { sum += x[i]; } return sum/n; } // use CPU to calculate std deviation (Welford's algorithm) double cpu_get_stddev(int n, double *x){ double mean = x[0]; double m2 = 0; double delta; double delta2; for (int i = 1; i < n; i++){ delta = x[i] - mean; mean += delta/(i+1); delta2 = x[i] - mean; m2 += delta * delta2; } return sqrt(m2/n); } // CPU function to find max element of an array double cpu_get_max(int n, double *x) { double max = x[0]; for (int i = 1; i < n; i++) { max = (max < x[i]) ? x[i] : max; } return max; } // CPU function to find min element of an array double cpu_get_min(int n, double *x) { double min = x[0]; for (int i = 1; i < n; i++) { min = (x[i] < min) ? x[i] : min; } return min; } // use CPU to calculate min, mean, max, std deviation (Welford's algorithm) stats cpu_get_all(int n, double *x){ stats myStats; double mean = x[0]; double min = x[0]; double max = x[0]; double m2 = 0; double delta; double delta2; for (int i = 1; i < n; i++){ max = (max < x[i]) ? x[i] : max; min = (x[i] < min) ? x[i] : min; delta = x[i] - mean; mean += delta/(i+1); delta2 = x[i] - mean; m2 += delta * delta2; } myStats.mean = mean; myStats.min = min; myStats.max = max; myStats.stddev = sqrt(m2/n); return myStats; } // Kernel function to find the maximum element of an array __global__ void get_gpu_max(int n, double *x, double *results) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; double max = x[index]; for (int i = index + stride; i < n; i += stride) { max = (max < x[i]) ? x[i] : max; } results[index] = max; } // Kernel function to find the minimum element of an array __global__ void get_gpu_min(int n, double *x, double *results) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; double min = x[index]; for (int i = index + stride; i < n; i += stride) { min = (x[i] < min) ? x[i] : min; } results[index] = min; } // kernel to calculate the mean on the GPU __global__ void get_gpu_mean(int n, double *x, double *results) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; double mean = x[index]; int count = 1; for (int i = index + stride; i < n; i += stride){ count++; mean += (x[i] - mean)/count; } results[index] = mean; } // Calculate std deviation on the GPU __global__ void get_gpu_stddev(int n, double *x, double *results){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; double mean = x[index]; double m2 = 0; double delta; double delta2; int count = 1; for (int i = index + stride; i < n; i += stride){ count++; delta = x[i] - mean; mean += delta/count; delta2 = x[i] - mean; m2 += delta * delta2; } results[index] = m2; } // caluclate all stats on the GPU __global__ void get_gpu_all(int n, double *x, stats *all_results){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; double mean = x[index]; double min = x[index]; double max = x[index]; double m2 = 0; double delta; double delta2; int count = 1; for (int i = index + stride; i < n; i += stride){ max = (max < x[i]) ? x[i] : max; min = (x[i] < min) ? x[i] : min; count++; delta = x[i] - mean; mean += delta/count; delta2 = x[i] - mean; m2 += delta * delta2; } all_results[index].mean = mean; all_results[index].min = min; all_results[index].max = max; all_results[index].stddev = m2; // m2 not actually std dev } void print_diff(double x, double y){ cout << "Difference: " << 100*(y - x)/x << "%\n"; } void run_tests(int N_pre, int N_BLOCKS, int THREADS_PER_BLK) { // We need N to be a multiple of N_THREADS int N = N_BLOCKS * THREADS_PER_BLK * floor(N_pre / (THREADS_PER_BLK * N_BLOCKS)); /** cout << "N = " << N << endl; cout << "N_BLOCKS = " << N_BLOCKS << endl; cout << "THREADS_PER_BLK = " << THREADS_PER_BLK << endl; cout << "Allocating memory and initializing..."; **/ double *x; cudaMallocManaged(&x, N*sizeof(double)); srand(time(NULL)); for (int i = 0; i < N; i++) { x[i] = ((double) rand()) / ((double) RAND_MAX); } double *results; cudaMallocManaged(&results, N_BLOCKS*THREADS_PER_BLK*sizeof(double)); // use CPU to calculate max auto start = std::chrono::high_resolution_clock::now(); double cpu_max = cpu_get_max(N, x); auto end = std::chrono::high_resolution_clock::now(); auto dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); cout << "CPU calculated max:" << fixed << cpu_max << "_____"; // fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); fprintf(stdout,"%d,%d,%d,max,cpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); //fprintf(stdout," ,%ld\n",cpu_max); // use GPU to calculate max start = std::chrono::high_resolution_clock::now(); get_gpu_max<<<N_BLOCKS, THREADS_PER_BLK>>>(N, x, results); cudaDeviceSynchronize(); double gpu_max = results[0]; for (int i = 1; i < N_BLOCKS*THREADS_PER_BLK; i++) { gpu_max = (gpu_max < results[i]) ? results[i] : gpu_max; } end = std::chrono::high_resolution_clock::now(); dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); cout << "GPU calculated max:" << fixed << gpu_max << "_____"; //fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); fprintf(stdout,"%d,%d,%d,max,gpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); //fprintf(stdout," ,%ld\n",gpu_max); //print_diff(cpu_max, gpu_max); //cout << endl; // use CPU to calculate min start = std::chrono::high_resolution_clock::now(); double cpu_min = cpu_get_min(N, x); end = std::chrono::high_resolution_clock::now(); dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); cout << "CPU calculated min:" << fixed << cpu_min << "_____"; //fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); fprintf(stdout,"%d,%d,%d,min,cpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); //fprintf(stdout," ,%ld\n",cpu_min); // use GPU to calculate min start = std::chrono::high_resolution_clock::now(); get_gpu_min<<<N_BLOCKS, THREADS_PER_BLK>>>(N, x, results); cudaDeviceSynchronize(); double gpu_min = results[0]; for (int i = 1; i < N_BLOCKS*THREADS_PER_BLK; i++) { gpu_min = (results[i] < gpu_min) ? results[i] : gpu_min; } end = std::chrono::high_resolution_clock::now(); dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); cout << "GPU calculated min:" << fixed << gpu_min << "_____"; //fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); fprintf(stdout,"%d,%d,%d,min,gpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); //fprintf(stdout," ,%ld\n",gpu_min); //print_diff(cpu_min, gpu_min); //cout << endl; // use CPU to calculate mean start = std::chrono::high_resolution_clock::now(); double cpu_mean = cpu_get_mean(N, x); end = std::chrono::high_resolution_clock::now(); dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); cout << "CPU calculated mean:" << fixed << cpu_mean << "_____"; //fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); fprintf(stdout,"%d,%d,%d,avg,cpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); //fprintf(stdout," ,%ld\n",cpu_mean); // use GPU to calculate mean start = std::chrono::high_resolution_clock::now(); get_gpu_mean<<<N_BLOCKS, THREADS_PER_BLK>>>(N, x, results); cudaDeviceSynchronize(); double gpu_mean_sum = 0; for (int i = 0; i < N_BLOCKS*THREADS_PER_BLK; i++) { gpu_mean_sum += results[i]; } double gpu_mean = gpu_mean_sum/(N_BLOCKS*THREADS_PER_BLK); end = std::chrono::high_resolution_clock::now(); dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); cout << "GPU calculated mean:" << fixed << gpu_mean << "_____"; //fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); fprintf(stdout,"%d,%d,%d,avg,gpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); //fprintf(stdout," ,%ld\n",gpu_mean); //print_diff(cpu_mean, gpu_mean); //cout << endl; // use CPU to calculate std dev start = std::chrono::high_resolution_clock::now(); double cpu_stddev = cpu_get_stddev(N, x); end = std::chrono::high_resolution_clock::now(); dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); cout << "CPU calculated std dev:" << fixed << cpu_stddev << "_____"; //fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); fprintf(stdout,"%d,%d,%d,dev,cpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); //fprintf(stdout," ,%ld\n",cpu_stddev); // use GPU to calculate std dev start = std::chrono::high_resolution_clock::now(); get_gpu_stddev<<<N_BLOCKS, THREADS_PER_BLK>>>(N, x, results); cudaDeviceSynchronize(); double gpu_m2 = 0; for (int i = 0; i < N_BLOCKS*THREADS_PER_BLK; i++) { gpu_m2 += results[i]; } double gpu_stddev = sqrt(gpu_m2/N); end = std::chrono::high_resolution_clock::now(); dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); cout << "GPU calculated std dev:" << fixed << gpu_stddev << "_____"; //fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); fprintf(stdout,"%d,%d,%d,dev,gpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); /** print_diff(cpu_stddev, gpu_stddev); cout << endl; **/ //fprintf(stdout," ,%ld\n",gpu_stddev); // use CPU to calculate all stats start = std::chrono::high_resolution_clock::now(); stats my_stats = cpu_get_all(N, x); end = std::chrono::high_resolution_clock::now(); dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); /** cout << "Concurrent: CPU calculated max:" << fixed << my_stats.max << endl; cout << "Concurrent: CPU calculated min:" << fixed << my_stats.min << endl; cout << "Concurrent: CPU calculated mean:" << fixed << my_stats.mean << endl; cout << "Concurrent: CPU calculated std dev:" << fixed << my_stats.stddev << endl; fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); **/ fprintf(stdout,"%d,%d,%d,all,cpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); cudaFree(results); // use GPU to calculate all stats stats* all_results; cudaMallocManaged(&all_results, N_BLOCKS*THREADS_PER_BLK*sizeof(stats)); // start the timer start = std::chrono::high_resolution_clock::now(); // run calculations on the GPU get_gpu_all<<<N_BLOCKS, THREADS_PER_BLK>>>(N, x, all_results); // synchrnonize cudaDeviceSynchronize(); // We now need to accumulate results from all threads double m2 = all_results[0].stddev; double mean = all_results[0].mean; double delta; double new_mean; int n_a = N / (N_BLOCKS*THREADS_PER_BLK); int n_b = n_a; double max = all_results[0].max; double min = all_results[0].min; for (int i = 1; i < N_BLOCKS*THREADS_PER_BLK; i++) { new_mean = all_results[i].mean; delta = new_mean - mean; // we update our running mean value mean = (n_a*mean + n_b*new_mean)/(n_a + n_b); m2 += all_results[i].stddev + delta * delta * n_a * n_b / (n_a + n_b); n_a += n_b; min = (all_results[i].min < min) ? all_results[i].min : min; max = (all_results[i].max > max) ? all_results[i].max : max; } double stddev = sqrt(m2/N); end = std::chrono::high_resolution_clock::now(); dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start); /** cout << "Concurrent: GPU calculated max:" << fixed << max << endl; cout << "Concurrent: GPU calculated min:" << fixed << min << endl; cout << "Concurrent: GPU calculated mean:" << fixed << mean << endl; cout << "Concurrent: GPU calculated std dev:" << fixed << stddev << endl; fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count()); **/ fprintf(stdout,"%d,%d,%d,all,gpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count()); // Free memory cudaFree(x); cudaFree(all_results); } int main(void) { // We want to display floats with max precision cout.precision(17); int Ns[] = {50000000,100000000,150000000}; int TPBs[] = {256,512}; int NBs[] = {1,4}; for (int n : Ns) { for (int threads_per_block : TPBs) { for (int n_blocks : NBs) { run_tests(n, n_blocks, threads_per_block); } } } }
9e93d8b9ca6a0ba1c9870017e8e93d06615f2b54.hip
// !!! This is a file automatically generated by hipify!!! /* CUDA Implementation for knn query*/ #ifndef _KNN_QUERY_KERNEL #define _KNN_QUERY_KERNEL #include <cmath> #include <vector> #include <cstdio> #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <THH/THH.h> // NOTE: AT_ASSERT has become AT_CHECK on master after 0.4. #define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) AT_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) // Constants used by the program #define BLOCK_DIM 16 #define DEBUG 0 /** * Computes the distance between two matrix A (reference points) and * B (query points) containing respectively wA and wB points. * * @param A pointer on the matrix A * @param wA width of the matrix A = number of points in A * @param B pointer on the matrix B * @param wB width of the matrix B = number of points in B * @param dim dimension of points = height of matrices A and B * @param AB pointer on the matrix containing the wA*wB distances computed */ template <typename scalar_t> __global__ void cuComputeDistanceGlobal( scalar_t* A, int wA, scalar_t* B, int wB, int dim, scalar_t* AB){ // Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B __shared__ scalar_t shared_A[BLOCK_DIM][BLOCK_DIM]; __shared__ scalar_t shared_B[BLOCK_DIM][BLOCK_DIM]; // Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step) __shared__ int begin_A; __shared__ int begin_B; __shared__ int step_A; __shared__ int step_B; __shared__ int end_A; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Other variables scalar_t tmp; scalar_t ssd = 0; // Loop parameters begin_A = BLOCK_DIM * blockIdx.y; begin_B = BLOCK_DIM * blockIdx.x; step_A = BLOCK_DIM * wA; step_B = BLOCK_DIM * wB; end_A = begin_A + (dim-1) * wA; // Conditions int cond0 = (begin_A + tx < wA); // used to write in shared memory int cond1 = (begin_B + tx < wB); // used to write in shared memory & to computations and to write in output matrix int cond2 = (begin_A + ty < wA); // used to computations and to write in output matrix // Loop over all the sub-matrices of A and B required to compute the block sub-matrix for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) { // Load the matrices from device memory to shared memory; each thread loads one element of each matrix if (a/wA + ty < dim){ shared_A[ty][tx] = (cond0)? A[a + wA * ty + tx] : 0; shared_B[ty][tx] = (cond1)? B[b + wB * ty + tx] : 0; } else{ shared_A[ty][tx] = 0; shared_B[ty][tx] = 0; } // Synchronize to make sure the matrices are loaded __syncthreads(); // Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix if (cond2 && cond1){ for (int k = 0; k < BLOCK_DIM; ++k){ tmp = shared_A[k][ty] - shared_B[k][tx]; ssd += tmp*tmp; } } // Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; each thread writes one element if (cond2 && cond1) AB[(begin_A + ty) * wB + begin_B + tx] = ssd; } /** * Gathers k-th smallest distances for each column of the distance matrix in the top. * * @param dist distance matrix * @param ind index matrix * @param width width of the distance matrix and of the index matrix * @param height height of the distance matrix and of the index matrix * @param k number of neighbors to consider */ template <typename scalar_t> __global__ void cuInsertionSort( scalar_t *dist, long *ind, int width, int height, int k){ // Variables int l, i, j; scalar_t *p_dist; long *p_ind; scalar_t curr_dist, max_dist; long curr_row, max_row; unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x; if (xIndex<width){ // Pointer shift, initialization, and max value p_dist = dist + xIndex; p_ind = ind + xIndex; max_dist = p_dist[0]; p_ind[0] = 1; // Part 1 : sort kth firt elementZ for (l=1; l<k; l++){ curr_row = l * width; curr_dist = p_dist[curr_row]; if (curr_dist<max_dist){ i=l-1; for (int a=0; a<l-1; a++){ if (p_dist[a*width]>curr_dist){ i=a; break; } } for (j=l; j>i; j--){ p_dist[j*width] = p_dist[(j-1)*width]; p_ind[j*width] = p_ind[(j-1)*width]; } p_dist[i*width] = curr_dist; p_ind[i*width] = l + 1; } else { p_ind[l*width] = l + 1; } max_dist = p_dist[curr_row]; } // Part 2 : insert element in the k-th first lines max_row = (k-1)*width; for (l=k; l<height; l++){ curr_dist = p_dist[l*width]; if (curr_dist<max_dist){ i=k-1; for (int a=0; a<k-1; a++){ if (p_dist[a*width]>curr_dist){ i=a; break; } } for (j=k-1; j>i; j--){ p_dist[j*width] = p_dist[(j-1)*width]; p_ind[j*width] = p_ind[(j-1)*width]; } p_dist[i*width] = curr_dist; p_ind[i*width] = l + 1; max_dist = p_dist[max_row]; } } } } /** * Computes the square root of the first line (width-th first element) * of the distance matrix. * * @param dist distance matrix * @param width width of the distance matrix * @param k number of neighbors to consider */ template <typename scalar_t> __global__ void cuParallelSqrt(scalar_t *dist, int width, int k){ unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x; unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if (xIndex<width && yIndex<k) dist[yIndex*width + xIndex] = sqrt(dist[yIndex*width + xIndex]); } //-----------------------------------------------------------------------------------------------// // K-th NEAREST NEIGHBORS // //-----------------------------------------------------------------------------------------------// /** * K nearest neighbor algorithm * - Initialize CUDA * - Allocate device memory * - Copy point sets (reference and query points) from host to device memory * - Compute the distances + indexes to the k nearest neighbors for each query point * - Copy distances from device to host memory * * @param ref_host reference points ; pointer to linear matrix * @param ref_nb number of reference points ; width of the matrix * @param query_host query points ; pointer to linear matrix * @param query_nb number of query points ; width of the matrix * @param dim dimension of points ; height of the matrices * @param k number of neighbor to consider * @param dist_host distances to k nearest neighbors ; pointer to linear matrix * @param dist_host indexes of the k nearest neighbors ; pointer to linear matrix * */ std::vector<at::Tensor> knn( at::Tensor & query, at::Tensor & key, const int k ){ CHECK_CONTIGUOUS(key); CHECK_CUDA(key); CHECK_CONTIGUOUS(query); CHECK_CUDA(query); long batch = key.size(0); long dim = key.size(1); CHECK_EQ(batch, query.size(0)); CHECK_EQ(dim, query.size(1)); long num_key = key.size(2); long num_query = query.size(2); at::Tensor dist = at::empty({batch, num_query, k}, query.options()); at::Tensor ind = at::empty({batch, k, num_query}, query.options().dtype(at::kLong)); for (int elt = 0; elt < batch; elt++) { at::Tensor b_dist = at::empty({num_key, num_query}, query.options()); AT_DISPATCH_FLOATING_TYPES(query.type(), "ComputeDistance", ([&] { scalar_t * key_dev = key[elt].data<scalar_t>(); scalar_t * query_dev = query[elt].data<scalar_t>(); scalar_t * dist_dev = b_dist.data<scalar_t>(); long * ind_dev = ind[elt].data<long>(); // Grids ans threads dim3 g_16x16(num_query / BLOCK_DIM, num_key / BLOCK_DIM, 1); dim3 t_16x16(BLOCK_DIM, BLOCK_DIM, 1); if (num_query % BLOCK_DIM != 0) g_16x16.x += 1; if (num_key % BLOCK_DIM != 0) g_16x16.y += 1; // dim3 g_256x1(num_query / 256, 1, 1); dim3 t_256x1(256, 1, 1); if (num_query%256 != 0) g_256x1.x += 1; dim3 g_k_16x16(num_query / BLOCK_DIM, k / BLOCK_DIM, 1); dim3 t_k_16x16(BLOCK_DIM, BLOCK_DIM, 1); if (num_query % BLOCK_DIM != 0) g_k_16x16.x += 1; if (k % BLOCK_DIM != 0) g_k_16x16.y += 1; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // Kernel 1: Compute all the distances hipLaunchKernelGGL(( cuComputeDistanceGlobal), dim3(g_16x16), dim3(t_16x16), 0, stream, key_dev, num_key, query_dev, num_query, dim, dist_dev); // Kernel 2: Sort each column hipLaunchKernelGGL(( cuInsertionSort), dim3(g_256x1), dim3(t_256x1), 0, stream, dist_dev, ind_dev, num_query, num_key, k); })); dist[elt] = b_dist.slice(0, 0, k).transpose(0, 1); } THCudaCheck(hipGetLastError()); return std::vector<at::Tensor>({ind.transpose(1, 2)-1, dist}); } #endif
9e93d8b9ca6a0ba1c9870017e8e93d06615f2b54.cu
/* CUDA Implementation for knn query*/ #ifndef _KNN_QUERY_KERNEL #define _KNN_QUERY_KERNEL #include <cmath> #include <vector> #include <cstdio> #include "cuda.h" #include <ATen/ATen.h> #include <THC/THC.h> // NOTE: AT_ASSERT has become AT_CHECK on master after 0.4. #define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) AT_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) // Constants used by the program #define BLOCK_DIM 16 #define DEBUG 0 /** * Computes the distance between two matrix A (reference points) and * B (query points) containing respectively wA and wB points. * * @param A pointer on the matrix A * @param wA width of the matrix A = number of points in A * @param B pointer on the matrix B * @param wB width of the matrix B = number of points in B * @param dim dimension of points = height of matrices A and B * @param AB pointer on the matrix containing the wA*wB distances computed */ template <typename scalar_t> __global__ void cuComputeDistanceGlobal( scalar_t* A, int wA, scalar_t* B, int wB, int dim, scalar_t* AB){ // Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B __shared__ scalar_t shared_A[BLOCK_DIM][BLOCK_DIM]; __shared__ scalar_t shared_B[BLOCK_DIM][BLOCK_DIM]; // Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step) __shared__ int begin_A; __shared__ int begin_B; __shared__ int step_A; __shared__ int step_B; __shared__ int end_A; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Other variables scalar_t tmp; scalar_t ssd = 0; // Loop parameters begin_A = BLOCK_DIM * blockIdx.y; begin_B = BLOCK_DIM * blockIdx.x; step_A = BLOCK_DIM * wA; step_B = BLOCK_DIM * wB; end_A = begin_A + (dim-1) * wA; // Conditions int cond0 = (begin_A + tx < wA); // used to write in shared memory int cond1 = (begin_B + tx < wB); // used to write in shared memory & to computations and to write in output matrix int cond2 = (begin_A + ty < wA); // used to computations and to write in output matrix // Loop over all the sub-matrices of A and B required to compute the block sub-matrix for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) { // Load the matrices from device memory to shared memory; each thread loads one element of each matrix if (a/wA + ty < dim){ shared_A[ty][tx] = (cond0)? A[a + wA * ty + tx] : 0; shared_B[ty][tx] = (cond1)? B[b + wB * ty + tx] : 0; } else{ shared_A[ty][tx] = 0; shared_B[ty][tx] = 0; } // Synchronize to make sure the matrices are loaded __syncthreads(); // Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix if (cond2 && cond1){ for (int k = 0; k < BLOCK_DIM; ++k){ tmp = shared_A[k][ty] - shared_B[k][tx]; ssd += tmp*tmp; } } // Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; each thread writes one element if (cond2 && cond1) AB[(begin_A + ty) * wB + begin_B + tx] = ssd; } /** * Gathers k-th smallest distances for each column of the distance matrix in the top. * * @param dist distance matrix * @param ind index matrix * @param width width of the distance matrix and of the index matrix * @param height height of the distance matrix and of the index matrix * @param k number of neighbors to consider */ template <typename scalar_t> __global__ void cuInsertionSort( scalar_t *dist, long *ind, int width, int height, int k){ // Variables int l, i, j; scalar_t *p_dist; long *p_ind; scalar_t curr_dist, max_dist; long curr_row, max_row; unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x; if (xIndex<width){ // Pointer shift, initialization, and max value p_dist = dist + xIndex; p_ind = ind + xIndex; max_dist = p_dist[0]; p_ind[0] = 1; // Part 1 : sort kth firt elementZ for (l=1; l<k; l++){ curr_row = l * width; curr_dist = p_dist[curr_row]; if (curr_dist<max_dist){ i=l-1; for (int a=0; a<l-1; a++){ if (p_dist[a*width]>curr_dist){ i=a; break; } } for (j=l; j>i; j--){ p_dist[j*width] = p_dist[(j-1)*width]; p_ind[j*width] = p_ind[(j-1)*width]; } p_dist[i*width] = curr_dist; p_ind[i*width] = l + 1; } else { p_ind[l*width] = l + 1; } max_dist = p_dist[curr_row]; } // Part 2 : insert element in the k-th first lines max_row = (k-1)*width; for (l=k; l<height; l++){ curr_dist = p_dist[l*width]; if (curr_dist<max_dist){ i=k-1; for (int a=0; a<k-1; a++){ if (p_dist[a*width]>curr_dist){ i=a; break; } } for (j=k-1; j>i; j--){ p_dist[j*width] = p_dist[(j-1)*width]; p_ind[j*width] = p_ind[(j-1)*width]; } p_dist[i*width] = curr_dist; p_ind[i*width] = l + 1; max_dist = p_dist[max_row]; } } } } /** * Computes the square root of the first line (width-th first element) * of the distance matrix. * * @param dist distance matrix * @param width width of the distance matrix * @param k number of neighbors to consider */ template <typename scalar_t> __global__ void cuParallelSqrt(scalar_t *dist, int width, int k){ unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x; unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if (xIndex<width && yIndex<k) dist[yIndex*width + xIndex] = sqrt(dist[yIndex*width + xIndex]); } //-----------------------------------------------------------------------------------------------// // K-th NEAREST NEIGHBORS // //-----------------------------------------------------------------------------------------------// /** * K nearest neighbor algorithm * - Initialize CUDA * - Allocate device memory * - Copy point sets (reference and query points) from host to device memory * - Compute the distances + indexes to the k nearest neighbors for each query point * - Copy distances from device to host memory * * @param ref_host reference points ; pointer to linear matrix * @param ref_nb number of reference points ; width of the matrix * @param query_host query points ; pointer to linear matrix * @param query_nb number of query points ; width of the matrix * @param dim dimension of points ; height of the matrices * @param k number of neighbor to consider * @param dist_host distances to k nearest neighbors ; pointer to linear matrix * @param dist_host indexes of the k nearest neighbors ; pointer to linear matrix * */ std::vector<at::Tensor> knn( at::Tensor & query, at::Tensor & key, const int k ){ CHECK_CONTIGUOUS(key); CHECK_CUDA(key); CHECK_CONTIGUOUS(query); CHECK_CUDA(query); long batch = key.size(0); long dim = key.size(1); CHECK_EQ(batch, query.size(0)); CHECK_EQ(dim, query.size(1)); long num_key = key.size(2); long num_query = query.size(2); at::Tensor dist = at::empty({batch, num_query, k}, query.options()); at::Tensor ind = at::empty({batch, k, num_query}, query.options().dtype(at::kLong)); for (int elt = 0; elt < batch; elt++) { at::Tensor b_dist = at::empty({num_key, num_query}, query.options()); AT_DISPATCH_FLOATING_TYPES(query.type(), "ComputeDistance", ([&] { scalar_t * key_dev = key[elt].data<scalar_t>(); scalar_t * query_dev = query[elt].data<scalar_t>(); scalar_t * dist_dev = b_dist.data<scalar_t>(); long * ind_dev = ind[elt].data<long>(); // Grids ans threads dim3 g_16x16(num_query / BLOCK_DIM, num_key / BLOCK_DIM, 1); dim3 t_16x16(BLOCK_DIM, BLOCK_DIM, 1); if (num_query % BLOCK_DIM != 0) g_16x16.x += 1; if (num_key % BLOCK_DIM != 0) g_16x16.y += 1; // dim3 g_256x1(num_query / 256, 1, 1); dim3 t_256x1(256, 1, 1); if (num_query%256 != 0) g_256x1.x += 1; dim3 g_k_16x16(num_query / BLOCK_DIM, k / BLOCK_DIM, 1); dim3 t_k_16x16(BLOCK_DIM, BLOCK_DIM, 1); if (num_query % BLOCK_DIM != 0) g_k_16x16.x += 1; if (k % BLOCK_DIM != 0) g_k_16x16.y += 1; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // Kernel 1: Compute all the distances cuComputeDistanceGlobal<<<g_16x16, t_16x16, 0, stream>>>(key_dev, num_key, query_dev, num_query, dim, dist_dev); // Kernel 2: Sort each column cuInsertionSort<<<g_256x1, t_256x1, 0, stream>>>(dist_dev, ind_dev, num_query, num_key, k); })); dist[elt] = b_dist.slice(0, 0, k).transpose(0, 1); } THCudaCheck(cudaGetLastError()); return std::vector<at::Tensor>({ind.transpose(1, 2)-1, dist}); } #endif
947ccaad7f1801f36567eb92adf1e21491bec9e8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hicoo.hpp" #include "coo.hpp" void HicooTensor::freeAllArrays() { DEBUG_PRINT("HT: free all arrays\n"); freeHostArrays(); freeDeviceArrays(); } void HicooTensor::freeHostArrays() { DEBUG_PRINT("HT: free host arrays\n"); DEBUG_PRINT(" - points_h = %p\n", points_h); DEBUG_PRINT(" - blocks_h = %p\n", blocks_h); free(points_h); free(blocks_h); points_h = nullptr; blocks_h = nullptr; } void HicooTensor::freeDeviceArrays() { DEBUG_PRINT("HT: free device arrays\n"); DEBUG_PRINT(" - points_d = %p\n", points_d); DEBUG_PRINT(" - blocks_d = %p\n", blocks_d); if(points_d != nullptr) // Because the docs lie: "If devPtr is 0, no operation is performed." cudaErrorCheck(hipFree(points_d)); if(blocks_d != nullptr) // Because the docs lie: "If devPtr is 0, no operation is performed." cudaErrorCheck(hipFree(blocks_d)); points_d = nullptr; blocks_d = nullptr; } void HicooTensor::uploadToDevice() { DEBUG_PRINT("HT: upload to device\n"); assert(points_h != nullptr); assert(blocks_h != nullptr); freeDeviceArrays(); cudaErrorCheck(hipMalloc((void **) &points_d, sizeof(HicooPoint) * numPoints)); assert(points_d != nullptr); cudaErrorCheck(hipMemcpy(points_d, points_h, sizeof(HicooPoint) * numPoints, hipMemcpyHostToDevice)); cudaErrorCheck(hipMalloc((void **) &blocks_d, sizeof(HicooBlock) * (numBlocks+1))); assert(blocks_d != nullptr); cudaErrorCheck(hipMemcpy(blocks_d, blocks_h, sizeof(HicooBlock) * (numBlocks+1), hipMemcpyHostToDevice)); } void HicooTensor::downloadToHost() { DEBUG_PRINT("HT: download to host\n"); assert(points_d != nullptr); assert(blocks_d != nullptr); freeHostArrays(); points_h = (HicooPoint*)malloc(sizeof(HicooPoint) * numPoints); assert(points_h != nullptr); cudaErrorCheck(hipMemcpy(points_h, points_d, sizeof(HicooPoint) * numPoints, hipMemcpyDeviceToHost)); blocks_h = (HicooBlock*)malloc(sizeof(HicooBlock) * (numBlocks+1)); assert(blocks_h != nullptr); cudaErrorCheck(hipMemcpy(blocks_h, blocks_d, sizeof(HicooBlock) * (numBlocks+1), hipMemcpyDeviceToHost)); } CooTensorManager HicooTensor::toCoo() { DEBUG_PRINT("HT: to coo\n"); CooTensorManager ret; assert(0); // TODO return ret; } DenseMatrixManager HicooTensor::mttkrp_naive_cpu(DenseMatrixManager D, DenseMatrixManager C) { /* * for each block (except the last) * for each element starting at block address and ending at next block address * l = blockX * blockWidth + pointX * k = blockY * blockHeight + pointY * i = blockZ * blockDepth + pointZ * * for j = 1..j * A(i,j) += point.val * C(k,j) + D(l,j) * return A */ DenseMatrixManager ret; DenseMatrix& a = ret; DenseMatrix& c = C; DenseMatrix& d = D; assert(points_h != nullptr); assert(blocks_h != nullptr); assert(c.values_h != nullptr); assert(d.values_h != nullptr); //Naive: each thread is a non-zero //optimization: each thread does a few R's //Naive implementation: DEBUG_PRINT("HICOO: mttkrp naive cpu\n"); // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); int I = this->depth, J = d.width, K = this->height, L = this->width; DEBUG_PRINT(" - I = %d, J = %d, K = %d, L = %d\n", I, J, K, L); assert(d.height == L); assert(c.height == K); assert(c.width == J); a.setSize(I, J); //for each non-zero DEBUG_PRINT(" - performing operation\n"); for (int b = 0; b < this->numBlocks; b++) { HicooBlock block = this->access_block(b); unsigned long long startBlockAddress = block.blockAddress; unsigned long long endBlockAddress = this->access_block(b+1).blockAddress; for (unsigned long long index = startBlockAddress; index < endBlockAddress; index++) { HicooPoint point = access_point(index); int l = block.blockX * this->blockWidth + point.x; int k = block.blockY * this->blockHeight + point.y; int i = block.blockZ * this->blockDepth + point.z; for (int j = 0; j < J; j++) { a.access(i,j) += point.value * d.access(l,j) * c.access(k,j); } } } return ret; } __global__ void mttkrp_naive_gpu_kernel(HicooTensor hicooTensor, DenseMatrix d, DenseMatrix c, DenseMatrix ret); //wrapper function for the sake of convenience DenseMatrixManager HicooTensor::mttkrp_naive_gpu(DenseMatrixManager D, DenseMatrixManager C) { DEBUG_PRINT("HT: naive mttkrp gpu\n"); this->uploadToDevice(); DenseMatrixManager ret; DenseMatrix& a = ret; DenseMatrix& c = C; DenseMatrix& d = D; assert(points_d != nullptr); assert(blocks_d != nullptr); assert(c.values_d != nullptr); assert(d.values_d != nullptr); // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); int I = this->depth, J = d.width, K = this->height, L = this->width; DEBUG_PRINT(" - I = %d, J = %d, K = %d, L = %d\n", I, J, K, L); assert(d.height == L); assert(c.height == K); assert(c.width == J); DEBUG_PRINT(" - setSize_d\n"); a.setSize_d(I, J); //todo: split up the blocks & blocks per threads appropriately hipLaunchKernelGGL(( mttkrp_naive_gpu_kernel), dim3(ceil(this->numBlocks/64.0)), dim3(64), 0, 0, *this, d, c, ret); hipDeviceSynchronize(); ret.tensor->tensor.downloadToHost(); DEBUG_PRINT(" - done\n"); return ret; } //Not declared as part of the class... Cuda doesn't like it's kernels as part of OOP __global__ void mttkrp_naive_gpu_kernel(HicooTensor hicooTensor, DenseMatrix d, DenseMatrix c, DenseMatrix ret) { /* * for each block (except the last) * for each element starting at block address and ending at next block address * l = blockX * blockWidth + pointX * k = blockY * blockHeight + pointY * i = blockZ * blockDepth + pointZ * * for j = 1..j * A(i,j) += point.val * C(k,j) + D(l,j) * return A */ DenseMatrix& a = ret; //Naive: each thread is a block unsigned int index = blockDim.x * blockIdx.x + threadIdx.x; if(index < hicooTensor.numBlocks) { // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); int J = d.width;// K = hicooTensor.height, L = hicooTensor.width, I = hicooTensor.depth //each thread gets a block HicooBlock block = hicooTensor.access_block(index); unsigned long long startBlockAddress = block.blockAddress; unsigned long long endBlockAddress = hicooTensor.access_block(index + 1).blockAddress; for (unsigned long long index = startBlockAddress; index < endBlockAddress; index++) { HicooPoint point = hicooTensor.access_point(index); int l = block.blockX * hicooTensor.blockWidth + point.x; int k = block.blockY * hicooTensor.blockHeight + point.y; int i = block.blockZ * hicooTensor.blockDepth + point.z; for (int j = 0; j < J; j++) { float val = point.value * d.access(l, j) * c.access(k, j); atomicAdd(&a.access(i,j), val); } } } } __global__ void hicoo_james1_kernel(DenseMatrix a, HicooTensor b, DenseMatrix d, DenseMatrix c) { //launched with 128 threads per block //Start of current HiCOO block HicooBlock& ba = b.access_block(blockIdx.x); //Start of subsequent block HicooBlock& bb = b.access_block(blockIdx.x+1); unsigned int bx = ba.blockX * b.blockWidth; unsigned int by = ba.blockY * b.blockHeight; unsigned int bz = ba.blockZ * b.blockDepth; for(int e = ba.blockAddress; e < bb.blockAddress; e++) { //For every HiCOOPoint in the HiCOO block HicooPoint& p = b.access_point(e); for(int j = threadIdx.x; j < a.width; j+=128) { float val = p.value * d.access(p.x+bx,j) * c.access(p.y+by,j); atomicAdd(&a.access(p.z+bz, j), val); } } } DenseMatrixManager HicooTensor::mttkrp_james1(DenseMatrixManager D, DenseMatrixManager C) { DEBUG_PRINT("HT: mttkrp james1\n"); DEBUG_PRINT(" - asserts, initialization\n"); DenseMatrixManager ret; DenseMatrix& a = ret; DenseMatrix& c = C; DenseMatrix& d = D; assert(points_d != nullptr); assert(blocks_d != nullptr); assert(c.values_d != nullptr); assert(d.values_d != nullptr); // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); int I = this->depth, J = d.width, K = this->height, L = this->width; DEBUG_PRINT(" - I = %d, J = %d, K = %d, L = %d\n", I, J, K, L); assert(d.height == L); assert(c.height == K); assert(c.width == J); DEBUG_PRINT(" - malloc output matrix\n"); a.setSize_d(I, J); DEBUG_PRINT(" - do compute on gpu\n"); hipLaunchKernelGGL(( hicoo_james1_kernel), dim3(numBlocks), dim3(128), 0, 0, a, *this, d, c); DEBUG_PRINT(" - downloading to host\n"); a.downloadToHost(); DEBUG_PRINT(" - done\n"); return ret; } __global__ void hicoo_collab1_kernel(DenseMatrix a, HicooTensor b, DenseMatrix d, DenseMatrix c) { //launched with 128 threads per block //Start of current HiCOO block HicooBlock& ba = b.access_block(blockIdx.x); //Start of subsequent block HicooBlock& bb = b.access_block(blockIdx.x+1); unsigned int bx = ba.blockX * b.blockWidth; unsigned int by = ba.blockY * b.blockHeight; unsigned int bz = ba.blockZ * b.blockDepth; for(int e = ba.blockAddress; e < bb.blockAddress; e++) { //For every HiCOOPoint in the HiCOO block HicooPoint& p = b.access_point(e); int j; for (j = threadIdx.x; j <= a.width - blockDim.x*4 ; j += blockDim.x*4) { //4x unroll float val1 = p.value * d.access(p.x+bx,j + blockDim.x*0) * c.access(p.y+by,j + blockDim.x*0); float val2 = p.value * d.access(p.x+bx,j + blockDim.x*1) * c.access(p.y+by,j + blockDim.x*1); float val3 = p.value * d.access(p.x+bx,j + blockDim.x*2) * c.access(p.y+by,j + blockDim.x*2); float val4 = p.value * d.access(p.x+bx,j + blockDim.x*3) * c.access(p.y+by,j + blockDim.x*3); atomicAdd(&a.access(p.z+bz, j + blockDim.x*0), val1); atomicAdd(&a.access(p.z+bz, j + blockDim.x*1), val2); atomicAdd(&a.access(p.z+bz, j + blockDim.x*2), val3); atomicAdd(&a.access(p.z+bz, j + blockDim.x*3), val4); } //finish what the unrolling couldn't for(/*continue j*/; j < a.width; j+=blockDim.x) { float val = p.value * d.access(p.x+bx,j) * c.access(p.y+by,j); atomicAdd(&a.access(p.z+bz, j), val); } } } DenseMatrixManager HicooTensor::mttkrp_collab1(DenseMatrixManager D, DenseMatrixManager C) { DEBUG_PRINT("HT: mttkrp collab1\n"); DEBUG_PRINT(" - asserts, initialization\n"); DenseMatrixManager ret; DenseMatrix& a = ret; DenseMatrix& c = C; DenseMatrix& d = D; assert(points_d != nullptr); assert(blocks_d != nullptr); assert(c.values_d != nullptr); assert(d.values_d != nullptr); // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); int I = this->depth, J = d.width, K = this->height, L = this->width; DEBUG_PRINT(" - I = %d, J = %d, K = %d, L = %d\n", I, J, K, L); assert(d.height == L); assert(c.height == K); assert(c.width == J); DEBUG_PRINT(" - malloc output matrix\n"); a.setSize_d(I, J); DEBUG_PRINT(" - do compute on gpu\n"); hipLaunchKernelGGL(( hicoo_collab1_kernel), dim3(numBlocks), dim3(128), 0, 0, a, *this, d, c); DEBUG_PRINT(" - downloading to host\n"); a.downloadToHost(); DEBUG_PRINT(" - done\n"); return ret; } __global__ void hicoo_kevin1_kernel(DenseMatrix a, HicooTensor b, DenseMatrix d, DenseMatrix c) { HicooBlock& ba = b.access_block(blockIdx.x); HicooBlock& bb = b.access_block(blockIdx.x+1); unsigned int bx = ba.blockX * b.blockWidth; unsigned int by = ba.blockY * b.blockHeight; unsigned int bz = ba.blockZ * b.blockDepth; // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); for(int e = ba.blockAddress; e < bb.blockAddress; e++) { HicooPoint p = b.access_point(e); for(int j = threadIdx.x; j < a.width; j+=32) { float val = p.value * d.access(p.x+bx,j) * c.access(p.y+by,j); atomicAdd(&a.access(p.z+bz, j), val); } } } DenseMatrixManager HicooTensor::mttkrp_kevin1(DenseMatrixManager D, DenseMatrixManager C) { // Has each thread block mapped to a hicoo block (parallelizing blocks across J) DEBUG_PRINT("HT: mttkrp kevin1\n"); DEBUG_PRINT(" - asserts, initialization\n"); DenseMatrixManager ret; DenseMatrix& a = ret; DenseMatrix& c = C; DenseMatrix& d = D; assert(points_d != nullptr); assert(blocks_d != nullptr); assert(c.values_d != nullptr); assert(d.values_d != nullptr); // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); int I = this->depth, J = d.width, K = this->height, L = this->width; DEBUG_PRINT(" - I = %d, J = %d, K = %d, L = %d\n", I, J, K, L); assert(d.height == L); assert(c.height == K); assert(c.width == J); DEBUG_PRINT(" - malloc output matrix\n"); a.setSize_d(I, J); DEBUG_PRINT(" - do compute on gpu\n"); hipLaunchKernelGGL(( hicoo_kevin1_kernel), dim3(numBlocks), dim3(32), 0, 0, a, *this, d, c); DEBUG_PRINT(" - done\n"); return ret; } __global__ void hicoo_kevin2_kernel(DenseMatrix a, HicooTensor b, DenseMatrix d, DenseMatrix c, int* lut) { int myBlockZ = blockIdx.x; int bi = lut[myBlockZ]; HicooBlock ba = b.access_block(bi); while(ba.blockZ == myBlockZ && bi < b.numBlocks) { // go through each block in this blockZ HicooBlock bb = b.access_block(bi+1); unsigned int bx = ba.blockX * b.blockWidth; unsigned int by = ba.blockY * b.blockHeight; unsigned int bz = ba.blockZ * b.blockDepth; // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); for(int e = ba.blockAddress; e < bb.blockAddress; e++) { HicooPoint p = b.access_point(e); for(int j = threadIdx.x; j < a.width; j+=32) { // because each cuda block is only accessing one blockZ, we don't have to atomicAdd() float val = p.value * d.access(p.x+bx,j) * c.access(p.y+by,j); a.access(p.z+bz, j) += val; //atomicAdd(&a.access(p.z+bz, j), val); } } ba = bb; bi++; } } __global__ void hicoo_kevin2_lut_populate(HicooTensor b, int* lut) { // build a lookup table of where each blockZ starts int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < b.numBlocks && idx > 0) { HicooBlock prev = b.access_block(idx-1); HicooBlock curr = b.access_block(idx); if(prev.blockZ != curr.blockZ) { // only write if we're on a boundary // since the list is sorted, this won't have any race conditions lut[curr.blockZ] = idx; } } } DenseMatrixManager HicooTensor::mttkrp_kevin2(DenseMatrixManager D, DenseMatrixManager C) { // Avoid atomicAdd() by assigning each blockZ to a cuda block // Has each thread block mapped to a hicoo block (parallelizing blocks across J) DEBUG_PRINT("HT: mttkrp kevin2\n"); DEBUG_PRINT(" - asserts, initialization\n"); DenseMatrixManager ret; DenseMatrix& a = ret; DenseMatrix& c = C; DenseMatrix& d = D; assert(points_d != nullptr); assert(blocks_d != nullptr); assert(c.values_d != nullptr); assert(d.values_d != nullptr); assert(sorting == ZYX); // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); int I = this->depth, J = d.width, K = this->height, L = this->width; DEBUG_PRINT(" - I = %d, J = %d, K = %d, L = %d\n", I, J, K, L); assert(d.height == L); assert(c.height == K); assert(c.width == J); DEBUG_PRINT(" - malloc output matrix\n"); a.setSize_d(I, J); //if(depth > 20000000) { // printf("Skipping execution, because this likes to OOM or something\n"); // return ret; //} DEBUG_PRINT(" - create LUT on gpu\n"); int blocksZ = (width-1)/blockDepth + 1; int* zBlockIndices; cudaErrorCheck(hipMalloc((void **) &zBlockIndices, sizeof(int) * blocksZ)); assert(zBlockIndices != nullptr); cudaErrorCheck(hipMemset(zBlockIndices, 0, blocksZ * sizeof(int))); DEBUG_PRINT(" - populate LUT on gpu\n"); hipLaunchKernelGGL(( hicoo_kevin2_lut_populate), dim3((numBlocks-1)/32+1), dim3(32), 0, 0, *this, zBlockIndices); DEBUG_PRINT(" - do compute on gpu\n"); hipLaunchKernelGGL(( hicoo_kevin2_kernel), dim3(blocksZ), dim3(32), 0, 0, a, *this, d, c, zBlockIndices); DEBUG_PRINT(" - Freeing LUT\n"); cudaErrorCheck(hipFree(zBlockIndices)); DEBUG_PRINT(" - done\n"); return ret; } __global__ void hicoo_kevin3_kernel(DenseMatrix a, HicooTensor b, DenseMatrix d, DenseMatrix c) { int bi = blockIdx.x; // check that we are in the first block of this given blockZ (will often fail) // effecient if depth is very sparse; ineffecient otherwise HicooBlock ba = b.access_block(bi); if(bi > 0 && ba.blockZ == b.access_block(bi-1).blockZ) { // this makes a bit more sense when demorganized: // continue only if bi==0, or if blockZ different from previous return; } int blockZ = ba.blockZ; while(ba.blockZ == blockZ && bi < b.numBlocks) { // go through each block in this blockZ HicooBlock bb = b.access_block(bi+1); unsigned int bx = ba.blockX * b.blockWidth; unsigned int by = ba.blockY * b.blockHeight; unsigned int bz = ba.blockZ * b.blockDepth; // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); for(int e = ba.blockAddress; e < bb.blockAddress; e++) { HicooPoint p = b.access_point(e); for(int j = threadIdx.x; j < a.width; j+=32) { // because each cuda block is only accessing one blockZ, we don't have to atomicAdd() float val = p.value * d.access(p.x+bx,j) * c.access(p.y+by,j); a.access(p.z+bz, j) += val; //atomicAdd(&a.access(p.z+bz, j), val); } } ba = bb; bi++; } } DenseMatrixManager HicooTensor::mttkrp_kevin3(DenseMatrixManager D, DenseMatrixManager C) { // kevin2 but skip the LUT by pushing the essence of it into the kernel (still no atomicAdd) // effecient if depth is very sparse; ineffecient otherwise DEBUG_PRINT("HT: mttkrp kevin3\n"); DEBUG_PRINT(" - asserts, initialization\n"); DenseMatrixManager ret; DenseMatrix& a = ret; DenseMatrix& c = C; DenseMatrix& d = D; assert(points_d != nullptr); assert(blocks_d != nullptr); assert(c.values_d != nullptr); assert(d.values_d != nullptr); assert(sorting == ZYX); // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); int I = this->depth, J = d.width, K = this->height, L = this->width; DEBUG_PRINT(" - I = %d, J = %d, K = %d, L = %d\n", I, J, K, L); assert(d.height == L); assert(c.height == K); assert(c.width == J); DEBUG_PRINT(" - malloc output matrix\n"); a.setSize_d(I, J); DEBUG_PRINT(" - do compute on gpu\n"); hipLaunchKernelGGL(( hicoo_kevin3_kernel), dim3(numBlocks), dim3(32), 0, 0, a, *this, d, c); DEBUG_PRINT(" - done\n"); return ret; }
947ccaad7f1801f36567eb92adf1e21491bec9e8.cu
#include "hicoo.hpp" #include "coo.hpp" void HicooTensor::freeAllArrays() { DEBUG_PRINT("HT: free all arrays\n"); freeHostArrays(); freeDeviceArrays(); } void HicooTensor::freeHostArrays() { DEBUG_PRINT("HT: free host arrays\n"); DEBUG_PRINT(" - points_h = %p\n", points_h); DEBUG_PRINT(" - blocks_h = %p\n", blocks_h); free(points_h); free(blocks_h); points_h = nullptr; blocks_h = nullptr; } void HicooTensor::freeDeviceArrays() { DEBUG_PRINT("HT: free device arrays\n"); DEBUG_PRINT(" - points_d = %p\n", points_d); DEBUG_PRINT(" - blocks_d = %p\n", blocks_d); if(points_d != nullptr) // Because the docs lie: "If devPtr is 0, no operation is performed." cudaErrorCheck(cudaFree(points_d)); if(blocks_d != nullptr) // Because the docs lie: "If devPtr is 0, no operation is performed." cudaErrorCheck(cudaFree(blocks_d)); points_d = nullptr; blocks_d = nullptr; } void HicooTensor::uploadToDevice() { DEBUG_PRINT("HT: upload to device\n"); assert(points_h != nullptr); assert(blocks_h != nullptr); freeDeviceArrays(); cudaErrorCheck(cudaMalloc((void **) &points_d, sizeof(HicooPoint) * numPoints)); assert(points_d != nullptr); cudaErrorCheck(cudaMemcpy(points_d, points_h, sizeof(HicooPoint) * numPoints, cudaMemcpyHostToDevice)); cudaErrorCheck(cudaMalloc((void **) &blocks_d, sizeof(HicooBlock) * (numBlocks+1))); assert(blocks_d != nullptr); cudaErrorCheck(cudaMemcpy(blocks_d, blocks_h, sizeof(HicooBlock) * (numBlocks+1), cudaMemcpyHostToDevice)); } void HicooTensor::downloadToHost() { DEBUG_PRINT("HT: download to host\n"); assert(points_d != nullptr); assert(blocks_d != nullptr); freeHostArrays(); points_h = (HicooPoint*)malloc(sizeof(HicooPoint) * numPoints); assert(points_h != nullptr); cudaErrorCheck(cudaMemcpy(points_h, points_d, sizeof(HicooPoint) * numPoints, cudaMemcpyDeviceToHost)); blocks_h = (HicooBlock*)malloc(sizeof(HicooBlock) * (numBlocks+1)); assert(blocks_h != nullptr); cudaErrorCheck(cudaMemcpy(blocks_h, blocks_d, sizeof(HicooBlock) * (numBlocks+1), cudaMemcpyDeviceToHost)); } CooTensorManager HicooTensor::toCoo() { DEBUG_PRINT("HT: to coo\n"); CooTensorManager ret; assert(0); // TODO return ret; } DenseMatrixManager HicooTensor::mttkrp_naive_cpu(DenseMatrixManager D, DenseMatrixManager C) { /* * for each block (except the last) * for each element starting at block address and ending at next block address * l = blockX * blockWidth + pointX * k = blockY * blockHeight + pointY * i = blockZ * blockDepth + pointZ * * for j = 1..j * A(i,j) += point.val * C(k,j) + D(l,j) * return A */ DenseMatrixManager ret; DenseMatrix& a = ret; DenseMatrix& c = C; DenseMatrix& d = D; assert(points_h != nullptr); assert(blocks_h != nullptr); assert(c.values_h != nullptr); assert(d.values_h != nullptr); //Naive: each thread is a non-zero //optimization: each thread does a few R's //Naive implementation: DEBUG_PRINT("HICOO: mttkrp naive cpu\n"); // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); int I = this->depth, J = d.width, K = this->height, L = this->width; DEBUG_PRINT(" - I = %d, J = %d, K = %d, L = %d\n", I, J, K, L); assert(d.height == L); assert(c.height == K); assert(c.width == J); a.setSize(I, J); //for each non-zero DEBUG_PRINT(" - performing operation\n"); for (int b = 0; b < this->numBlocks; b++) { HicooBlock block = this->access_block(b); unsigned long long startBlockAddress = block.blockAddress; unsigned long long endBlockAddress = this->access_block(b+1).blockAddress; for (unsigned long long index = startBlockAddress; index < endBlockAddress; index++) { HicooPoint point = access_point(index); int l = block.blockX * this->blockWidth + point.x; int k = block.blockY * this->blockHeight + point.y; int i = block.blockZ * this->blockDepth + point.z; for (int j = 0; j < J; j++) { a.access(i,j) += point.value * d.access(l,j) * c.access(k,j); } } } return ret; } __global__ void mttkrp_naive_gpu_kernel(HicooTensor hicooTensor, DenseMatrix d, DenseMatrix c, DenseMatrix ret); //wrapper function for the sake of convenience DenseMatrixManager HicooTensor::mttkrp_naive_gpu(DenseMatrixManager D, DenseMatrixManager C) { DEBUG_PRINT("HT: naive mttkrp gpu\n"); this->uploadToDevice(); DenseMatrixManager ret; DenseMatrix& a = ret; DenseMatrix& c = C; DenseMatrix& d = D; assert(points_d != nullptr); assert(blocks_d != nullptr); assert(c.values_d != nullptr); assert(d.values_d != nullptr); // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); int I = this->depth, J = d.width, K = this->height, L = this->width; DEBUG_PRINT(" - I = %d, J = %d, K = %d, L = %d\n", I, J, K, L); assert(d.height == L); assert(c.height == K); assert(c.width == J); DEBUG_PRINT(" - setSize_d\n"); a.setSize_d(I, J); //todo: split up the blocks & blocks per threads appropriately mttkrp_naive_gpu_kernel<<<ceil(this->numBlocks/64.0), 64>>>(*this, d, c, ret); cudaDeviceSynchronize(); ret.tensor->tensor.downloadToHost(); DEBUG_PRINT(" - done\n"); return ret; } //Not declared as part of the class... Cuda doesn't like it's kernels as part of OOP __global__ void mttkrp_naive_gpu_kernel(HicooTensor hicooTensor, DenseMatrix d, DenseMatrix c, DenseMatrix ret) { /* * for each block (except the last) * for each element starting at block address and ending at next block address * l = blockX * blockWidth + pointX * k = blockY * blockHeight + pointY * i = blockZ * blockDepth + pointZ * * for j = 1..j * A(i,j) += point.val * C(k,j) + D(l,j) * return A */ DenseMatrix& a = ret; //Naive: each thread is a block unsigned int index = blockDim.x * blockIdx.x + threadIdx.x; if(index < hicooTensor.numBlocks) { // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); int J = d.width;// K = hicooTensor.height, L = hicooTensor.width, I = hicooTensor.depth //each thread gets a block HicooBlock block = hicooTensor.access_block(index); unsigned long long startBlockAddress = block.blockAddress; unsigned long long endBlockAddress = hicooTensor.access_block(index + 1).blockAddress; for (unsigned long long index = startBlockAddress; index < endBlockAddress; index++) { HicooPoint point = hicooTensor.access_point(index); int l = block.blockX * hicooTensor.blockWidth + point.x; int k = block.blockY * hicooTensor.blockHeight + point.y; int i = block.blockZ * hicooTensor.blockDepth + point.z; for (int j = 0; j < J; j++) { float val = point.value * d.access(l, j) * c.access(k, j); atomicAdd(&a.access(i,j), val); } } } } __global__ void hicoo_james1_kernel(DenseMatrix a, HicooTensor b, DenseMatrix d, DenseMatrix c) { //launched with 128 threads per block //Start of current HiCOO block HicooBlock& ba = b.access_block(blockIdx.x); //Start of subsequent block HicooBlock& bb = b.access_block(blockIdx.x+1); unsigned int bx = ba.blockX * b.blockWidth; unsigned int by = ba.blockY * b.blockHeight; unsigned int bz = ba.blockZ * b.blockDepth; for(int e = ba.blockAddress; e < bb.blockAddress; e++) { //For every HiCOOPoint in the HiCOO block HicooPoint& p = b.access_point(e); for(int j = threadIdx.x; j < a.width; j+=128) { float val = p.value * d.access(p.x+bx,j) * c.access(p.y+by,j); atomicAdd(&a.access(p.z+bz, j), val); } } } DenseMatrixManager HicooTensor::mttkrp_james1(DenseMatrixManager D, DenseMatrixManager C) { DEBUG_PRINT("HT: mttkrp james1\n"); DEBUG_PRINT(" - asserts, initialization\n"); DenseMatrixManager ret; DenseMatrix& a = ret; DenseMatrix& c = C; DenseMatrix& d = D; assert(points_d != nullptr); assert(blocks_d != nullptr); assert(c.values_d != nullptr); assert(d.values_d != nullptr); // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); int I = this->depth, J = d.width, K = this->height, L = this->width; DEBUG_PRINT(" - I = %d, J = %d, K = %d, L = %d\n", I, J, K, L); assert(d.height == L); assert(c.height == K); assert(c.width == J); DEBUG_PRINT(" - malloc output matrix\n"); a.setSize_d(I, J); DEBUG_PRINT(" - do compute on gpu\n"); hicoo_james1_kernel<<<numBlocks, 128>>>(a, *this, d, c); DEBUG_PRINT(" - downloading to host\n"); a.downloadToHost(); DEBUG_PRINT(" - done\n"); return ret; } __global__ void hicoo_collab1_kernel(DenseMatrix a, HicooTensor b, DenseMatrix d, DenseMatrix c) { //launched with 128 threads per block //Start of current HiCOO block HicooBlock& ba = b.access_block(blockIdx.x); //Start of subsequent block HicooBlock& bb = b.access_block(blockIdx.x+1); unsigned int bx = ba.blockX * b.blockWidth; unsigned int by = ba.blockY * b.blockHeight; unsigned int bz = ba.blockZ * b.blockDepth; for(int e = ba.blockAddress; e < bb.blockAddress; e++) { //For every HiCOOPoint in the HiCOO block HicooPoint& p = b.access_point(e); int j; for (j = threadIdx.x; j <= a.width - blockDim.x*4 ; j += blockDim.x*4) { //4x unroll float val1 = p.value * d.access(p.x+bx,j + blockDim.x*0) * c.access(p.y+by,j + blockDim.x*0); float val2 = p.value * d.access(p.x+bx,j + blockDim.x*1) * c.access(p.y+by,j + blockDim.x*1); float val3 = p.value * d.access(p.x+bx,j + blockDim.x*2) * c.access(p.y+by,j + blockDim.x*2); float val4 = p.value * d.access(p.x+bx,j + blockDim.x*3) * c.access(p.y+by,j + blockDim.x*3); atomicAdd(&a.access(p.z+bz, j + blockDim.x*0), val1); atomicAdd(&a.access(p.z+bz, j + blockDim.x*1), val2); atomicAdd(&a.access(p.z+bz, j + blockDim.x*2), val3); atomicAdd(&a.access(p.z+bz, j + blockDim.x*3), val4); } //finish what the unrolling couldn't for(/*continue j*/; j < a.width; j+=blockDim.x) { float val = p.value * d.access(p.x+bx,j) * c.access(p.y+by,j); atomicAdd(&a.access(p.z+bz, j), val); } } } DenseMatrixManager HicooTensor::mttkrp_collab1(DenseMatrixManager D, DenseMatrixManager C) { DEBUG_PRINT("HT: mttkrp collab1\n"); DEBUG_PRINT(" - asserts, initialization\n"); DenseMatrixManager ret; DenseMatrix& a = ret; DenseMatrix& c = C; DenseMatrix& d = D; assert(points_d != nullptr); assert(blocks_d != nullptr); assert(c.values_d != nullptr); assert(d.values_d != nullptr); // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); int I = this->depth, J = d.width, K = this->height, L = this->width; DEBUG_PRINT(" - I = %d, J = %d, K = %d, L = %d\n", I, J, K, L); assert(d.height == L); assert(c.height == K); assert(c.width == J); DEBUG_PRINT(" - malloc output matrix\n"); a.setSize_d(I, J); DEBUG_PRINT(" - do compute on gpu\n"); hicoo_collab1_kernel<<<numBlocks, 128>>>(a, *this, d, c); DEBUG_PRINT(" - downloading to host\n"); a.downloadToHost(); DEBUG_PRINT(" - done\n"); return ret; } __global__ void hicoo_kevin1_kernel(DenseMatrix a, HicooTensor b, DenseMatrix d, DenseMatrix c) { HicooBlock& ba = b.access_block(blockIdx.x); HicooBlock& bb = b.access_block(blockIdx.x+1); unsigned int bx = ba.blockX * b.blockWidth; unsigned int by = ba.blockY * b.blockHeight; unsigned int bz = ba.blockZ * b.blockDepth; // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); for(int e = ba.blockAddress; e < bb.blockAddress; e++) { HicooPoint p = b.access_point(e); for(int j = threadIdx.x; j < a.width; j+=32) { float val = p.value * d.access(p.x+bx,j) * c.access(p.y+by,j); atomicAdd(&a.access(p.z+bz, j), val); } } } DenseMatrixManager HicooTensor::mttkrp_kevin1(DenseMatrixManager D, DenseMatrixManager C) { // Has each thread block mapped to a hicoo block (parallelizing blocks across J) DEBUG_PRINT("HT: mttkrp kevin1\n"); DEBUG_PRINT(" - asserts, initialization\n"); DenseMatrixManager ret; DenseMatrix& a = ret; DenseMatrix& c = C; DenseMatrix& d = D; assert(points_d != nullptr); assert(blocks_d != nullptr); assert(c.values_d != nullptr); assert(d.values_d != nullptr); // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); int I = this->depth, J = d.width, K = this->height, L = this->width; DEBUG_PRINT(" - I = %d, J = %d, K = %d, L = %d\n", I, J, K, L); assert(d.height == L); assert(c.height == K); assert(c.width == J); DEBUG_PRINT(" - malloc output matrix\n"); a.setSize_d(I, J); DEBUG_PRINT(" - do compute on gpu\n"); hicoo_kevin1_kernel<<<numBlocks, 32>>>(a, *this, d, c); DEBUG_PRINT(" - done\n"); return ret; } __global__ void hicoo_kevin2_kernel(DenseMatrix a, HicooTensor b, DenseMatrix d, DenseMatrix c, int* lut) { int myBlockZ = blockIdx.x; int bi = lut[myBlockZ]; HicooBlock ba = b.access_block(bi); while(ba.blockZ == myBlockZ && bi < b.numBlocks) { // go through each block in this blockZ HicooBlock bb = b.access_block(bi+1); unsigned int bx = ba.blockX * b.blockWidth; unsigned int by = ba.blockY * b.blockHeight; unsigned int bz = ba.blockZ * b.blockDepth; // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); for(int e = ba.blockAddress; e < bb.blockAddress; e++) { HicooPoint p = b.access_point(e); for(int j = threadIdx.x; j < a.width; j+=32) { // because each cuda block is only accessing one blockZ, we don't have to atomicAdd() float val = p.value * d.access(p.x+bx,j) * c.access(p.y+by,j); a.access(p.z+bz, j) += val; //atomicAdd(&a.access(p.z+bz, j), val); } } ba = bb; bi++; } } __global__ void hicoo_kevin2_lut_populate(HicooTensor b, int* lut) { // build a lookup table of where each blockZ starts int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < b.numBlocks && idx > 0) { HicooBlock prev = b.access_block(idx-1); HicooBlock curr = b.access_block(idx); if(prev.blockZ != curr.blockZ) { // only write if we're on a boundary // since the list is sorted, this won't have any race conditions lut[curr.blockZ] = idx; } } } DenseMatrixManager HicooTensor::mttkrp_kevin2(DenseMatrixManager D, DenseMatrixManager C) { // Avoid atomicAdd() by assigning each blockZ to a cuda block // Has each thread block mapped to a hicoo block (parallelizing blocks across J) DEBUG_PRINT("HT: mttkrp kevin2\n"); DEBUG_PRINT(" - asserts, initialization\n"); DenseMatrixManager ret; DenseMatrix& a = ret; DenseMatrix& c = C; DenseMatrix& d = D; assert(points_d != nullptr); assert(blocks_d != nullptr); assert(c.values_d != nullptr); assert(d.values_d != nullptr); assert(sorting == ZYX); // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); int I = this->depth, J = d.width, K = this->height, L = this->width; DEBUG_PRINT(" - I = %d, J = %d, K = %d, L = %d\n", I, J, K, L); assert(d.height == L); assert(c.height == K); assert(c.width == J); DEBUG_PRINT(" - malloc output matrix\n"); a.setSize_d(I, J); //if(depth > 20000000) { // printf("Skipping execution, because this likes to OOM or something\n"); // return ret; //} DEBUG_PRINT(" - create LUT on gpu\n"); int blocksZ = (width-1)/blockDepth + 1; int* zBlockIndices; cudaErrorCheck(cudaMalloc((void **) &zBlockIndices, sizeof(int) * blocksZ)); assert(zBlockIndices != nullptr); cudaErrorCheck(cudaMemset(zBlockIndices, 0, blocksZ * sizeof(int))); DEBUG_PRINT(" - populate LUT on gpu\n"); hicoo_kevin2_lut_populate<<<(numBlocks-1)/32+1, 32>>>(*this, zBlockIndices); DEBUG_PRINT(" - do compute on gpu\n"); hicoo_kevin2_kernel<<<blocksZ, 32>>>(a, *this, d, c, zBlockIndices); DEBUG_PRINT(" - Freeing LUT\n"); cudaErrorCheck(cudaFree(zBlockIndices)); DEBUG_PRINT(" - done\n"); return ret; } __global__ void hicoo_kevin3_kernel(DenseMatrix a, HicooTensor b, DenseMatrix d, DenseMatrix c) { int bi = blockIdx.x; // check that we are in the first block of this given blockZ (will often fail) // effecient if depth is very sparse; ineffecient otherwise HicooBlock ba = b.access_block(bi); if(bi > 0 && ba.blockZ == b.access_block(bi-1).blockZ) { // this makes a bit more sense when demorganized: // continue only if bi==0, or if blockZ different from previous return; } int blockZ = ba.blockZ; while(ba.blockZ == blockZ && bi < b.numBlocks) { // go through each block in this blockZ HicooBlock bb = b.access_block(bi+1); unsigned int bx = ba.blockX * b.blockWidth; unsigned int by = ba.blockY * b.blockHeight; unsigned int bz = ba.blockZ * b.blockDepth; // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); for(int e = ba.blockAddress; e < bb.blockAddress; e++) { HicooPoint p = b.access_point(e); for(int j = threadIdx.x; j < a.width; j+=32) { // because each cuda block is only accessing one blockZ, we don't have to atomicAdd() float val = p.value * d.access(p.x+bx,j) * c.access(p.y+by,j); a.access(p.z+bz, j) += val; //atomicAdd(&a.access(p.z+bz, j), val); } } ba = bb; bi++; } } DenseMatrixManager HicooTensor::mttkrp_kevin3(DenseMatrixManager D, DenseMatrixManager C) { // kevin2 but skip the LUT by pushing the essence of it into the kernel (still no atomicAdd) // effecient if depth is very sparse; ineffecient otherwise DEBUG_PRINT("HT: mttkrp kevin3\n"); DEBUG_PRINT(" - asserts, initialization\n"); DenseMatrixManager ret; DenseMatrix& a = ret; DenseMatrix& c = C; DenseMatrix& d = D; assert(points_d != nullptr); assert(blocks_d != nullptr); assert(c.values_d != nullptr); assert(d.values_d != nullptr); assert(sorting == ZYX); // A(i,j) = B(i,k,l) * D(l,j) * C(k,j); int I = this->depth, J = d.width, K = this->height, L = this->width; DEBUG_PRINT(" - I = %d, J = %d, K = %d, L = %d\n", I, J, K, L); assert(d.height == L); assert(c.height == K); assert(c.width == J); DEBUG_PRINT(" - malloc output matrix\n"); a.setSize_d(I, J); DEBUG_PRINT(" - do compute on gpu\n"); hicoo_kevin3_kernel<<<numBlocks, 32>>>(a, *this, d, c); DEBUG_PRINT(" - done\n"); return ret; }
a8c04fc6ebf47fc51e4c018915b1b04b0e8d4ed3.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCHW; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwish< float, 1, int32_t, float, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, float, LayoutDst, float, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 16, false, cutlass::arch::OpMultiplyAdd>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
a8c04fc6ebf47fc51e4c018915b1b04b0e8d4ed3.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCHW; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwish< float, 1, int32_t, float, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, float, LayoutDst, float, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 16, false, cutlass::arch::OpMultiplyAdd>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
cc10995cbd7f6ab6d4db3c34396ad09d415b9313.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> namespace { template <typename scalar_t> __device__ __forceinline__ scalar_t sigmoid(scalar_t z) { return 1.0 / (1.0 + exp(-z)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_sigmoid(scalar_t z) { const auto s = sigmoid(z); return (1.0 - s) * s; } template <typename scalar_t> __device__ __forceinline__ scalar_t d_tanh(scalar_t z) { const auto t = tanh(z); return 1 - (t * t); } template <typename scalar_t> __device__ __forceinline__ scalar_t elu(scalar_t z, scalar_t alpha = 1.0) { return fmaxf(0.0, z) + fminf(0.0, alpha * (exp(z) - 1.0)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_elu(scalar_t z, scalar_t alpha = 1.0) { const auto e = exp(z); const auto d_relu = z < 0.0 ? 0.0 : 1.0; return d_relu + (((alpha * (e - 1.0)) < 0.0) ? (alpha * e) : 0.0); } template <typename scalar_t> __global__ void lltm_cuda_forward_kernel( const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> gates, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> old_cell, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_h, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_cell, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> input_gate, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> output_gate, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> candidate_cell) { //batch index const int n = blockIdx.y; // column index const int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < gates.size(2)){ input_gate[n][c] = sigmoid(gates[n][0][c]); output_gate[n][c] = sigmoid(gates[n][1][c]); candidate_cell[n][c] = elu(gates[n][2][c]); new_cell[n][c] = old_cell[n][c] + candidate_cell[n][c] * input_gate[n][c]; new_h[n][c] = tanh(new_cell[n][c]) * output_gate[n][c]; } } template <typename scalar_t> __global__ void lltm_cuda_backward_kernel( torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> d_old_cell, torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> d_gates, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> grad_h, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> grad_cell, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_cell, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> input_gate, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> output_gate, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> candidate_cell, const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> gate_weights) { //batch index const int n = blockIdx.y; // column index const int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < d_gates.size(2)){ const auto d_output_gate = tanh(new_cell[n][c]) * grad_h[n][c]; const auto d_tanh_new_cell = output_gate[n][c] * grad_h[n][c]; const auto d_new_cell = d_tanh(new_cell[n][c]) * d_tanh_new_cell + grad_cell[n][c]; d_old_cell[n][c] = d_new_cell; const auto d_candidate_cell = input_gate[n][c] * d_new_cell; const auto d_input_gate = candidate_cell[n][c] * d_new_cell; d_gates[n][0][c] = d_input_gate * d_sigmoid(gate_weights[n][0][c]); d_gates[n][1][c] = d_output_gate * d_sigmoid(gate_weights[n][1][c]); d_gates[n][2][c] = d_candidate_cell * d_elu(gate_weights[n][2][c]); } } } // namespace std::vector<torch::Tensor> lltm_cuda_forward( torch::Tensor input, torch::Tensor weights, torch::Tensor bias, torch::Tensor old_h, torch::Tensor old_cell) { auto X = torch::cat({old_h, input}, /*dim=*/1); auto gate_weights = torch::addmm(bias, X, weights.transpose(0, 1)); const auto batch_size = old_cell.size(0); const auto state_size = old_cell.size(1); auto gates = gate_weights.reshape({batch_size, 3, state_size}); auto new_h = torch::zeros_like(old_cell); auto new_cell = torch::zeros_like(old_cell); auto input_gate = torch::zeros_like(old_cell); auto output_gate = torch::zeros_like(old_cell); auto candidate_cell = torch::zeros_like(old_cell); const int threads = 128; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] { hipLaunchKernelGGL(( lltm_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), new_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>()); })); return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates}; } std::vector<torch::Tensor> lltm_cuda_backward( torch::Tensor grad_h, torch::Tensor grad_cell, torch::Tensor new_cell, torch::Tensor input_gate, torch::Tensor output_gate, torch::Tensor candidate_cell, torch::Tensor X, torch::Tensor gates, torch::Tensor weights) { auto d_old_cell = torch::zeros_like(new_cell); auto d_gates = torch::zeros_like(gates); const auto batch_size = new_cell.size(0); const auto state_size = new_cell.size(1); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] { hipLaunchKernelGGL(( lltm_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, d_old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), d_gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), grad_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), grad_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>()); })); auto d_gate_weights = d_gates.flatten(1, 2); auto d_weights = d_gate_weights.t().mm(X); auto d_bias = d_gate_weights.sum(/*dim=*/0, /*keepdim=*/true); auto d_X = d_gate_weights.mm(weights); auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size); auto d_input = d_X.slice(/*dim=*/1, state_size); return {d_old_h, d_input, d_weights, d_bias, d_old_cell, d_gates}; }
cc10995cbd7f6ab6d4db3c34396ad09d415b9313.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> namespace { template <typename scalar_t> __device__ __forceinline__ scalar_t sigmoid(scalar_t z) { return 1.0 / (1.0 + exp(-z)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_sigmoid(scalar_t z) { const auto s = sigmoid(z); return (1.0 - s) * s; } template <typename scalar_t> __device__ __forceinline__ scalar_t d_tanh(scalar_t z) { const auto t = tanh(z); return 1 - (t * t); } template <typename scalar_t> __device__ __forceinline__ scalar_t elu(scalar_t z, scalar_t alpha = 1.0) { return fmaxf(0.0, z) + fminf(0.0, alpha * (exp(z) - 1.0)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_elu(scalar_t z, scalar_t alpha = 1.0) { const auto e = exp(z); const auto d_relu = z < 0.0 ? 0.0 : 1.0; return d_relu + (((alpha * (e - 1.0)) < 0.0) ? (alpha * e) : 0.0); } template <typename scalar_t> __global__ void lltm_cuda_forward_kernel( const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> gates, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> old_cell, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_h, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_cell, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> input_gate, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> output_gate, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> candidate_cell) { //batch index const int n = blockIdx.y; // column index const int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < gates.size(2)){ input_gate[n][c] = sigmoid(gates[n][0][c]); output_gate[n][c] = sigmoid(gates[n][1][c]); candidate_cell[n][c] = elu(gates[n][2][c]); new_cell[n][c] = old_cell[n][c] + candidate_cell[n][c] * input_gate[n][c]; new_h[n][c] = tanh(new_cell[n][c]) * output_gate[n][c]; } } template <typename scalar_t> __global__ void lltm_cuda_backward_kernel( torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> d_old_cell, torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> d_gates, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> grad_h, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> grad_cell, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_cell, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> input_gate, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> output_gate, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> candidate_cell, const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> gate_weights) { //batch index const int n = blockIdx.y; // column index const int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < d_gates.size(2)){ const auto d_output_gate = tanh(new_cell[n][c]) * grad_h[n][c]; const auto d_tanh_new_cell = output_gate[n][c] * grad_h[n][c]; const auto d_new_cell = d_tanh(new_cell[n][c]) * d_tanh_new_cell + grad_cell[n][c]; d_old_cell[n][c] = d_new_cell; const auto d_candidate_cell = input_gate[n][c] * d_new_cell; const auto d_input_gate = candidate_cell[n][c] * d_new_cell; d_gates[n][0][c] = d_input_gate * d_sigmoid(gate_weights[n][0][c]); d_gates[n][1][c] = d_output_gate * d_sigmoid(gate_weights[n][1][c]); d_gates[n][2][c] = d_candidate_cell * d_elu(gate_weights[n][2][c]); } } } // namespace std::vector<torch::Tensor> lltm_cuda_forward( torch::Tensor input, torch::Tensor weights, torch::Tensor bias, torch::Tensor old_h, torch::Tensor old_cell) { auto X = torch::cat({old_h, input}, /*dim=*/1); auto gate_weights = torch::addmm(bias, X, weights.transpose(0, 1)); const auto batch_size = old_cell.size(0); const auto state_size = old_cell.size(1); auto gates = gate_weights.reshape({batch_size, 3, state_size}); auto new_h = torch::zeros_like(old_cell); auto new_cell = torch::zeros_like(old_cell); auto input_gate = torch::zeros_like(old_cell); auto output_gate = torch::zeros_like(old_cell); auto candidate_cell = torch::zeros_like(old_cell); const int threads = 128; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] { lltm_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), new_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>()); })); return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates}; } std::vector<torch::Tensor> lltm_cuda_backward( torch::Tensor grad_h, torch::Tensor grad_cell, torch::Tensor new_cell, torch::Tensor input_gate, torch::Tensor output_gate, torch::Tensor candidate_cell, torch::Tensor X, torch::Tensor gates, torch::Tensor weights) { auto d_old_cell = torch::zeros_like(new_cell); auto d_gates = torch::zeros_like(gates); const auto batch_size = new_cell.size(0); const auto state_size = new_cell.size(1); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] { lltm_cuda_backward_kernel<scalar_t><<<blocks, threads>>>( d_old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), d_gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), grad_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), grad_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>()); })); auto d_gate_weights = d_gates.flatten(1, 2); auto d_weights = d_gate_weights.t().mm(X); auto d_bias = d_gate_weights.sum(/*dim=*/0, /*keepdim=*/true); auto d_X = d_gate_weights.mm(weights); auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size); auto d_input = d_X.slice(/*dim=*/1, state_size); return {d_old_h, d_input, d_weights, d_bias, d_old_cell, d_gates}; }
75e84b44fc897073560a8658a7c7c268f9fbe428.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /***************************************************************************** * * Kernel code for GPUMCML * ========================================================================= * Featured Optimizations: * 1) Shared memory cache for high fluence region * 2) Reduced divergence * 3) Optimized atomicAdd * ****************************************************************************/ /* * This file is part of GPUMCML. * * GPUMCML is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GPUMCML is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GPUMCML. If not, see <http://www.gnu.org/licenses/>. */ #ifndef _CUDAMCML_KERNEL_CU_ #define _CUDAMCML_KERNEL_CU_ #include "cudamcml_kernel.h" #include "cudamcml_rng.cu" ////////////////////////////////////////////////////////////////////////////// // Initialize photon position (x, y, z), direction (ux, uy, uz), weight (w), // step size remainder (sleft), and current layer (layer) // Note: Infinitely narrow beam (pointing in the +z direction = downwards) ////////////////////////////////////////////////////////////////////////////// __device__ void LaunchPhoton(FLOAT *x, FLOAT *y, FLOAT *z, FLOAT *ux, FLOAT *uy, FLOAT *uz, FLOAT *w, FLOAT *sleft, UINT32 *layer) { *x = *y = *z = MCML_FP_ZERO; *ux = *uy = MCML_FP_ZERO; *uz = FP_ONE; *w = d_simparam.init_photon_w; *sleft = MCML_FP_ZERO; *layer = 1; } ////////////////////////////////////////////////////////////////////////////// // Compute the step size for a photon packet when it is in tissue // If sleft is 0, calculate new step size: -log(rnd)/(mua+mus). // Otherwise, pick up the leftover in sleft. ////////////////////////////////////////////////////////////////////////////// __device__ void ComputeStepSize(UINT32 layer, FLOAT *s_ptr, FLOAT *sleft_ptr, UINT64 *rnd_x, UINT32 *rnd_a) { // Make a new step if no leftover. FLOAT s = *sleft_ptr; if (s == MCML_FP_ZERO) { FLOAT rand = rand_MWC_oc(rnd_x, rnd_a); s = -__logf(rand); } *s_ptr = s * d_layerspecs[layer].rmuas; *sleft_ptr = MCML_FP_ZERO; } ////////////////////////////////////////////////////////////////////////////// // Check if the step size calculated above will cause the photon to hit the // boundary between 2 layers. // Return 1 for a hit, 0 otherwise. // If the projected step hits the boundary, the photon steps to the boundary // and the remainder of the step size is stored in sleft for the next iteration ////////////////////////////////////////////////////////////////////////////// __device__ int HitBoundary(UINT32 layer, FLOAT z, FLOAT uz, FLOAT *s_ptr, FLOAT *sleft_ptr) { /* step size to boundary. */ FLOAT dl_b; /* Distance to the boundary. */ FLOAT z_bound = (uz > MCML_FP_ZERO) ? d_layerspecs[layer].z1 : d_layerspecs[layer].z0; dl_b = __fdividef(z_bound - z, uz); // dl_b > 0 FLOAT s = *s_ptr; UINT32 hit_boundary = (uz != MCML_FP_ZERO) && (s > dl_b); if (hit_boundary) { // No need to multiply by (mua + mus), as it is later // divided by (mua + mus) anyways (in the original version). *sleft_ptr = (s - dl_b) * d_layerspecs[layer].muas; *s_ptr = dl_b; } return hit_boundary; } ////////////////////////////////////////////////////////////////////////////// // Move the photon by step size (s) along direction (ux,uy,uz) ////////////////////////////////////////////////////////////////////////////// __device__ void Hop(FLOAT s, FLOAT ux, FLOAT uy, FLOAT uz, FLOAT *x, FLOAT *y, FLOAT *z) { *x += s * ux; *y += s * uy; *z += s * uz; } ////////////////////////////////////////////////////////////////////////////// // UltraFast version (featuring reduced divergence compared to CPU-MCML) // If a photon hits a boundary, determine whether the photon is transmitted // into the next layer or reflected back by computing the internal reflectance ////////////////////////////////////////////////////////////////////////////// __device__ void FastReflectTransmit(FLOAT x, FLOAT y, SimState *d_state_ptr, FLOAT *ux, FLOAT *uy, FLOAT *uz, UINT32 *layer, FLOAT* w, UINT64 *rnd_x, UINT32 *rnd_a) { /* Collect all info that depend on the sign of "uz". */ FLOAT cos_crit; UINT32 new_layer; if (*uz > MCML_FP_ZERO) { cos_crit = d_layerspecs[(*layer)].cos_crit1; new_layer = (*layer)+1; } else { cos_crit = d_layerspecs[(*layer)].cos_crit0; new_layer = (*layer)-1; } // cosine of the incident angle (0 to 90 deg) FLOAT ca1 = fabsf(*uz); // The default move is to reflect. *uz = -(*uz); // Moving this check down to "RFresnel = MCML_FP_ZERO" slows down the // application, possibly because every thread is forced to do // too much. if (ca1 > cos_crit) { /* Compute the Fresnel reflectance. */ // incident and transmit refractive index FLOAT ni = d_layerspecs[(*layer)].n; FLOAT nt = d_layerspecs[new_layer].n; FLOAT ni_nt = __fdividef(ni, nt); // reused later FLOAT sa1 = sqrtf(FP_ONE-ca1*ca1); FLOAT sa2 = fminf(ni_nt * sa1, FP_ONE); if (ca1 > COSZERO) sa2 = sa1; FLOAT uz1 = sqrtf(FP_ONE-sa2*sa2); // uz1 = ca2 FLOAT ca1ca2 = ca1 * uz1; FLOAT sa1sa2 = sa1 * sa2; FLOAT sa1ca2 = sa1 * uz1; FLOAT ca1sa2 = ca1 * sa2; FLOAT cam = ca1ca2 + sa1sa2; /* c- = cc + ss. */ FLOAT sap = sa1ca2 + ca1sa2; /* s+ = sc + cs. */ FLOAT sam = sa1ca2 - ca1sa2; /* s- = sc - cs. */ FLOAT rFresnel = __fdividef(sam, sap*cam); rFresnel *= rFresnel; rFresnel *= (ca1ca2*ca1ca2 + sa1sa2*sa1sa2); // Hope "uz1" is very close to "ca1". if (ca1 > COSZERO) rFresnel = MCML_FP_ZERO; // In this case, we do not care if "uz1" is exactly 0. if (ca1 < COSNINETYDEG || sa2 == FP_ONE) rFresnel = FP_ONE; FLOAT rand = rand_MWC_co(rnd_x, rnd_a); if (rFresnel < rand) { // The move is to transmit. *layer = new_layer; // Let's do these even if the photon is dead. *ux *= ni_nt; *uy *= ni_nt; *uz = -copysignf(uz1, *uz); if (*layer == 0 || *layer > d_simparam.num_layers) { // transmitted FLOAT uz2 = *uz; UINT64 *ra_arr = d_state_ptr->Tt_ra; if (*layer == 0) { // diffuse reflectance uz2 = -uz2; ra_arr = d_state_ptr->Rd_ra; } UINT32 ia = acosf(uz2) * FP_TWO * RPI * d_simparam.na; UINT32 ir = __fdividef(sqrtf(x*x+y*y), d_simparam.dr); if (ir >= d_simparam.nr) ir = d_simparam.nr - 1; atomicAdd(&ra_arr[ia * d_simparam.nr + ir], (UINT32)(*w * WEIGHT_SCALE)); // Kill the photon. *w = MCML_FP_ZERO; } } } } ////////////////////////////////////////////////////////////////////////////// // Computing the scattering angle and new direction by // sampling the polar deflection angle theta and the // azimuthal angle psi. ////////////////////////////////////////////////////////////////////////////// __device__ void Spin(FLOAT g, FLOAT *ux, FLOAT *uy, FLOAT *uz, UINT64 *rnd_x, UINT32 *rnd_a) { FLOAT cost, sint; // cosine and sine of the polar deflection angle theta FLOAT cosp, sinp; // cosine and sine of the azimuthal angle psi FLOAT psi; FLOAT SIGN; FLOAT temp; FLOAT last_ux, last_uy, last_uz; FLOAT rand; /*********************************************************** * >>>>>>> SpinTheta * Choose (sample) a new theta angle for photon propagation * according to the anisotropy. * * If anisotropy g is 0, then * cos(theta) = 2*rand-1. * otherwise * sample according to the Henyey-Greenstein function. * * Returns the cosine of the polar deflection angle theta. ****/ rand = rand_MWC_co(rnd_x, rnd_a); cost = FP_TWO * rand - FP_ONE; if (g != MCML_FP_ZERO) { temp = __fdividef((FP_ONE - g * g), FP_ONE + g*cost); cost = __fdividef(FP_ONE + g * g - temp*temp, FP_TWO * g); cost = fmaxf(cost, -FP_ONE); cost = fminf(cost, FP_ONE); } sint = sqrtf(FP_ONE - cost * cost); /* spin psi 0-2pi. */ rand = rand_MWC_co(rnd_x, rnd_a); psi = FP_TWO * PI_const * rand; __sincosf(psi, &sinp, &cosp); FLOAT stcp = sint * cosp; FLOAT stsp = sint * sinp; last_ux = *ux; last_uy = *uy; last_uz = *uz; if (fabsf(last_uz) > COSZERO) /* normal incident. */ { *ux = stcp; *uy = stsp; SIGN = ((last_uz) >= MCML_FP_ZERO ? FP_ONE : -FP_ONE); *uz = cost * SIGN; } else /* regular incident. */ { temp = rsqrtf(FP_ONE - last_uz * last_uz); *ux = (stcp * last_ux * last_uz - stsp * last_uy) * temp + last_ux * cost; *uy = (stcp * last_uy * last_uz + stsp * last_ux) * temp + last_uy * cost; *uz = __fdividef(-stcp, temp) + last_uz * cost; } } ////////////////////////////////////////////////////////////////////////////// // Initialize thread states (tstates), created to allow a large // simulation to be broken up into batches // (avoiding display driver time-out errors) ////////////////////////////////////////////////////////////////////////////// __global__ void InitThreadState(GPUThreadStates tstates) { FLOAT photon_x, photon_y, photon_z; FLOAT photon_ux, photon_uy, photon_uz; FLOAT photon_w, photon_sleft; UINT32 photon_layer; // Initialize the photon and copy into photon_<parameter x> LaunchPhoton(&photon_x, &photon_y, &photon_z, &photon_ux, &photon_uy, &photon_uz, &photon_w, &photon_sleft, &photon_layer); // This is the unique ID for each thread (or thread ID = tid) UINT32 tid = blockIdx.x * NUM_THREADS_PER_BLOCK + threadIdx.x; tstates.photon_x[tid] = photon_x; tstates.photon_y[tid] = photon_y; tstates.photon_z[tid] = photon_z; tstates.photon_ux[tid] = photon_ux; tstates.photon_uy[tid] = photon_uy; tstates.photon_uz[tid] = photon_uz; tstates.photon_w[tid] = photon_w; tstates.photon_sleft[tid] = photon_sleft; tstates.photon_layer[tid] = photon_layer; tstates.is_active[tid] = 1; } ////////////////////////////////////////////////////////////////////////////// // Save thread states (tstates), by copying the current photon // data from registers into global memory ////////////////////////////////////////////////////////////////////////////// __device__ void SaveThreadState(SimState *d_state, GPUThreadStates *tstates, FLOAT photon_x, FLOAT photon_y, FLOAT photon_z, FLOAT photon_ux, FLOAT photon_uy, FLOAT photon_uz, FLOAT photon_w, FLOAT photon_sleft, UINT32 photon_layer, UINT64 rnd_x, UINT32 rnd_a, UINT32 is_active) { UINT32 tid = blockIdx.x * NUM_THREADS_PER_BLOCK + threadIdx.x; d_state->x[tid] = rnd_x; d_state->a[tid] = rnd_a; tstates->photon_x[tid] = photon_x; tstates->photon_y[tid] = photon_y; tstates->photon_z[tid] = photon_z; tstates->photon_ux[tid] = photon_ux; tstates->photon_uy[tid] = photon_uy; tstates->photon_uz[tid] = photon_uz; tstates->photon_w[tid] = photon_w; tstates->photon_sleft[tid] = photon_sleft; tstates->photon_layer[tid] = photon_layer; tstates->is_active[tid] = is_active; } ////////////////////////////////////////////////////////////////////////////// // Restore thread states (tstates), by copying the latest photon // data from global memory back into the registers ////////////////////////////////////////////////////////////////////////////// __device__ void RestoreThreadState(SimState *d_state, GPUThreadStates *tstates, FLOAT *photon_x, FLOAT *photon_y, FLOAT *photon_z, FLOAT *photon_ux, FLOAT *photon_uy, FLOAT *photon_uz, FLOAT *photon_w, FLOAT *photon_sleft, UINT32 *photon_layer, UINT64 *rnd_x, UINT32 *rnd_a, UINT32 *is_active) { UINT32 tid = blockIdx.x * NUM_THREADS_PER_BLOCK + threadIdx.x; *rnd_x = d_state->x[tid]; *rnd_a = d_state->a[tid]; *photon_x = tstates->photon_x[tid]; *photon_y = tstates->photon_y[tid]; *photon_z = tstates->photon_z[tid]; *photon_ux = tstates->photon_ux[tid]; *photon_uy = tstates->photon_uy[tid]; *photon_uz = tstates->photon_uz[tid]; *photon_w = tstates->photon_w[tid]; *photon_sleft = tstates->photon_sleft[tid]; *photon_layer = tstates->photon_layer[tid]; *is_active = tstates->is_active[tid]; } ////////////////////////////////////////////////////////////////////////////// // Flush the element at offset <shared_addr> of A_rz in shared memory (s_A_rz) // to the global memory (g_A_rz). <s_A_rz> is of dimension MAX_IR x MAX_IZ. ////////////////////////////////////////////////////////////////////////////// __device__ void Flush_Arz(UINT64 *g_A_rz, UINT64 *s_A_rz, UINT32 shared_addr) { UINT32 ir = shared_addr / MAX_IZ; UINT32 iz = shared_addr - ir * MAX_IZ; UINT32 global_addr = ir * d_simparam.nz + iz; atomicAdd(&g_A_rz[global_addr], s_A_rz[shared_addr]); } ////////////////////////////////////////////////////////////////////////////// // AtomicAdd to Shared Mem for Unsigned Long Long (ULL) data type // Note: Only Fermi architecture supports 64-bit atomicAdd to // both shared memory and global memory ////////////////////////////////////////////////////////////////////////////// __device__ void AtomicAddULL_Shared(UINT64* address, UINT32 add) { #ifdef FERMI atomicAdd(address, (UINT64)add); #else if (atomicAdd((UINT32*)address,add) +add < add) { atomicAdd(((UINT32*)address)+1, 1U); } #endif } ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// // Main Kernel for MCML (Calls the above inline device functions) ////////////////////////////////////////////////////////////////////////////// template <int ignoreAdetection> __global__ void MCMLKernel(SimState d_state, GPUThreadStates tstates) { // photon structure stored in registers FLOAT photon_x, photon_y ,photon_z; FLOAT photon_ux, photon_uy, photon_uz; FLOAT photon_w, photon_sleft; UINT32 photon_layer; // random number seeds UINT64 rnd_x; UINT32 rnd_a; // Flag to indicate if this thread is active UINT32 is_active; // Restore the thread state from global memory. RestoreThreadState(&d_state, &tstates, &photon_x, &photon_y, &photon_z, &photon_ux, &photon_uy, &photon_uz, &photon_w, &photon_sleft, &photon_layer, &rnd_x, &rnd_a, &is_active); ////////////////////////////////////////////////////////////////////////// // Coalesce consecutive weight drops to the same address. UINT32 last_w = 0; UINT32 last_ir = 0, last_iz = 0, last_addr = 0; ////////////////////////////////////////////////////////////////////////// #ifndef USE_TRUE_CACHE // Cache the frequently acessed region of A_rz in the shared memory. __shared__ UINT64 A_rz_shared[MAX_IR*MAX_IZ]; if (ignoreAdetection == 0) { // Clear the cache. for (int i = threadIdx.x; i < MAX_IR*MAX_IZ; i += NUM_THREADS_PER_BLOCK) A_rz_shared[i] = 0; __syncthreads(); } #endif ////////////////////////////////////////////////////////////////////////// // Get the copy of A_rz (in the global memory) this thread writes to. UINT64 *g_A_rz = d_state.A_rz; // + (blockIdx.x % N_A_RZ_COPIES) * (d_simparam.nz * d_simparam.nr); ////////////////////////////////////////////////////////////////////////// for (int iIndex = 0; iIndex < NUM_STEPS; ++iIndex) { // Only process photon if the thread is active. if (is_active) { FLOAT photon_s; // current step size //>>>>>>>>> StepSizeInTissue() in MCML ComputeStepSize(photon_layer, &photon_s, &photon_sleft, &rnd_x, &rnd_a); //>>>>>>>>> HitBoundary() in MCML UINT32 photon_hit = HitBoundary(photon_layer, photon_z, photon_uz, &photon_s, &photon_sleft); Hop(photon_s, photon_ux, photon_uy, photon_uz, &photon_x, &photon_y, &photon_z); if (photon_hit) { FastReflectTransmit(photon_x, photon_y, &d_state, &photon_ux, &photon_uy, &photon_uz, &photon_layer, &photon_w, &rnd_x, &rnd_a); } else { //>>>>>>>>> Drop() in MCML FLOAT dwa = photon_w * d_layerspecs[photon_layer].mua_muas; photon_w -= dwa; // DAVID if (ignoreAdetection == 0) { // automatic __float2uint_rz UINT32 iz = __fdividef(photon_z, d_simparam.dz); // automatic __float2uint_rz UINT32 ir = __fdividef( sqrtf(photon_x * photon_x + photon_y * photon_y), d_simparam.dr); // Only record if photon is not at the edge!! // This will be ignored anyways. if (iz < d_simparam.nz && ir < d_simparam.nr) { UINT32 addr = ir * d_simparam.nz + iz; if (addr != last_addr) { #ifndef USE_TRUE_CACHE // Commit the weight drop to memory. if (last_ir < MAX_IR && last_iz < MAX_IZ) { // Write it to the shared memory. last_addr = last_ir * MAX_IZ + last_iz; AtomicAddULL_Shared(&A_rz_shared[last_addr], last_w); } else #endif { // Write it to the global memory directly. atomicAdd(&g_A_rz[last_addr], (UINT64)last_w); } last_ir = ir; last_iz = iz; last_addr = addr; // Reset the last weight. last_w = 0; } // Accumulate to the last weight. last_w += (UINT32)(dwa * WEIGHT_SCALE); } } //>>>>>>>>> end of Drop() Spin(d_layerspecs[photon_layer].g, &photon_ux, &photon_uy, &photon_uz, &rnd_x, &rnd_a); } /*********************************************************** * >>>>>>>>> Roulette() * If the photon weight is small, the photon packet tries * to survive a roulette. ****/ if (photon_w < WEIGHT) { FLOAT rand = rand_MWC_co(&rnd_x, &rnd_a); if (photon_w != MCML_FP_ZERO && rand < CHANCE) // This photon survives the roulette. { photon_w *= (FP_ONE / CHANCE); } // This photon is terminated. else if (atomicSub(d_state.n_photons_left, 1) > NUM_THREADS) { // Launch a new photon. LaunchPhoton(&photon_x, &photon_y, &photon_z, &photon_ux, &photon_uy, &photon_uz, &photon_w, &photon_sleft, &photon_layer); } else { // No need to process any more photons. is_active = 0; } } } ////////////////////////////////////////////////////////////////////// } // end of the main loop __syncthreads(); ////////////////////////////////////////////////////////////////////////// if (ignoreAdetection == 0) { // Commit the last weight drop to the global memory directly. // NOTE: last_w == 0 if inactive. if (last_w > 0) { UINT32 global_addr = last_ir * d_simparam.nz + last_iz; atomicAdd(&g_A_rz[global_addr], last_w); } #ifndef USE_TRUE_CACHE // Flush A_rz_shared to the global memory. for (int i = threadIdx.x; i < MAX_IR*MAX_IZ; i += NUM_THREADS_PER_BLOCK) { Flush_Arz(g_A_rz, A_rz_shared, i); } #endif } ////////////////////////////////////////////////////////////////////////// // Save the thread state to the global memory. SaveThreadState(&d_state, &tstates, photon_x, photon_y, photon_z, photon_ux, photon_uy, photon_uz, photon_w, photon_sleft, photon_layer, rnd_x, rnd_a, is_active); } ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// //__global__ void sum_A_rz(UINT64 *g_A_rz) //{ // UINT64 sum; // // int n_elems = d_simparam.nz * d_simparam.nr; // int base_ofst, ofst; // // for (base_ofst = blockIdx.x * blockDim.x + threadIdx.x; // base_ofst < n_elems; base_ofst += blockDim.x * gridDim.x) // { // sum = 0; // ofst = base_ofst; //#pragma unroll // for (int i = 0; i < N_A_RZ_COPIES; ++i) // { // sum += g_A_rz[ofst]; // ofst += n_elems; // } // g_A_rz[base_ofst] = sum; // } //} #endif // _CUDAMCML_KERNEL_CU_
75e84b44fc897073560a8658a7c7c268f9fbe428.cu
/***************************************************************************** * * Kernel code for GPUMCML * ========================================================================= * Featured Optimizations: * 1) Shared memory cache for high fluence region * 2) Reduced divergence * 3) Optimized atomicAdd * ****************************************************************************/ /* * This file is part of GPUMCML. * * GPUMCML is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GPUMCML is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GPUMCML. If not, see <http://www.gnu.org/licenses/>. */ #ifndef _CUDAMCML_KERNEL_CU_ #define _CUDAMCML_KERNEL_CU_ #include "cudamcml_kernel.h" #include "cudamcml_rng.cu" ////////////////////////////////////////////////////////////////////////////// // Initialize photon position (x, y, z), direction (ux, uy, uz), weight (w), // step size remainder (sleft), and current layer (layer) // Note: Infinitely narrow beam (pointing in the +z direction = downwards) ////////////////////////////////////////////////////////////////////////////// __device__ void LaunchPhoton(FLOAT *x, FLOAT *y, FLOAT *z, FLOAT *ux, FLOAT *uy, FLOAT *uz, FLOAT *w, FLOAT *sleft, UINT32 *layer) { *x = *y = *z = MCML_FP_ZERO; *ux = *uy = MCML_FP_ZERO; *uz = FP_ONE; *w = d_simparam.init_photon_w; *sleft = MCML_FP_ZERO; *layer = 1; } ////////////////////////////////////////////////////////////////////////////// // Compute the step size for a photon packet when it is in tissue // If sleft is 0, calculate new step size: -log(rnd)/(mua+mus). // Otherwise, pick up the leftover in sleft. ////////////////////////////////////////////////////////////////////////////// __device__ void ComputeStepSize(UINT32 layer, FLOAT *s_ptr, FLOAT *sleft_ptr, UINT64 *rnd_x, UINT32 *rnd_a) { // Make a new step if no leftover. FLOAT s = *sleft_ptr; if (s == MCML_FP_ZERO) { FLOAT rand = rand_MWC_oc(rnd_x, rnd_a); s = -__logf(rand); } *s_ptr = s * d_layerspecs[layer].rmuas; *sleft_ptr = MCML_FP_ZERO; } ////////////////////////////////////////////////////////////////////////////// // Check if the step size calculated above will cause the photon to hit the // boundary between 2 layers. // Return 1 for a hit, 0 otherwise. // If the projected step hits the boundary, the photon steps to the boundary // and the remainder of the step size is stored in sleft for the next iteration ////////////////////////////////////////////////////////////////////////////// __device__ int HitBoundary(UINT32 layer, FLOAT z, FLOAT uz, FLOAT *s_ptr, FLOAT *sleft_ptr) { /* step size to boundary. */ FLOAT dl_b; /* Distance to the boundary. */ FLOAT z_bound = (uz > MCML_FP_ZERO) ? d_layerspecs[layer].z1 : d_layerspecs[layer].z0; dl_b = __fdividef(z_bound - z, uz); // dl_b > 0 FLOAT s = *s_ptr; UINT32 hit_boundary = (uz != MCML_FP_ZERO) && (s > dl_b); if (hit_boundary) { // No need to multiply by (mua + mus), as it is later // divided by (mua + mus) anyways (in the original version). *sleft_ptr = (s - dl_b) * d_layerspecs[layer].muas; *s_ptr = dl_b; } return hit_boundary; } ////////////////////////////////////////////////////////////////////////////// // Move the photon by step size (s) along direction (ux,uy,uz) ////////////////////////////////////////////////////////////////////////////// __device__ void Hop(FLOAT s, FLOAT ux, FLOAT uy, FLOAT uz, FLOAT *x, FLOAT *y, FLOAT *z) { *x += s * ux; *y += s * uy; *z += s * uz; } ////////////////////////////////////////////////////////////////////////////// // UltraFast version (featuring reduced divergence compared to CPU-MCML) // If a photon hits a boundary, determine whether the photon is transmitted // into the next layer or reflected back by computing the internal reflectance ////////////////////////////////////////////////////////////////////////////// __device__ void FastReflectTransmit(FLOAT x, FLOAT y, SimState *d_state_ptr, FLOAT *ux, FLOAT *uy, FLOAT *uz, UINT32 *layer, FLOAT* w, UINT64 *rnd_x, UINT32 *rnd_a) { /* Collect all info that depend on the sign of "uz". */ FLOAT cos_crit; UINT32 new_layer; if (*uz > MCML_FP_ZERO) { cos_crit = d_layerspecs[(*layer)].cos_crit1; new_layer = (*layer)+1; } else { cos_crit = d_layerspecs[(*layer)].cos_crit0; new_layer = (*layer)-1; } // cosine of the incident angle (0 to 90 deg) FLOAT ca1 = fabsf(*uz); // The default move is to reflect. *uz = -(*uz); // Moving this check down to "RFresnel = MCML_FP_ZERO" slows down the // application, possibly because every thread is forced to do // too much. if (ca1 > cos_crit) { /* Compute the Fresnel reflectance. */ // incident and transmit refractive index FLOAT ni = d_layerspecs[(*layer)].n; FLOAT nt = d_layerspecs[new_layer].n; FLOAT ni_nt = __fdividef(ni, nt); // reused later FLOAT sa1 = sqrtf(FP_ONE-ca1*ca1); FLOAT sa2 = fminf(ni_nt * sa1, FP_ONE); if (ca1 > COSZERO) sa2 = sa1; FLOAT uz1 = sqrtf(FP_ONE-sa2*sa2); // uz1 = ca2 FLOAT ca1ca2 = ca1 * uz1; FLOAT sa1sa2 = sa1 * sa2; FLOAT sa1ca2 = sa1 * uz1; FLOAT ca1sa2 = ca1 * sa2; FLOAT cam = ca1ca2 + sa1sa2; /* c- = cc + ss. */ FLOAT sap = sa1ca2 + ca1sa2; /* s+ = sc + cs. */ FLOAT sam = sa1ca2 - ca1sa2; /* s- = sc - cs. */ FLOAT rFresnel = __fdividef(sam, sap*cam); rFresnel *= rFresnel; rFresnel *= (ca1ca2*ca1ca2 + sa1sa2*sa1sa2); // Hope "uz1" is very close to "ca1". if (ca1 > COSZERO) rFresnel = MCML_FP_ZERO; // In this case, we do not care if "uz1" is exactly 0. if (ca1 < COSNINETYDEG || sa2 == FP_ONE) rFresnel = FP_ONE; FLOAT rand = rand_MWC_co(rnd_x, rnd_a); if (rFresnel < rand) { // The move is to transmit. *layer = new_layer; // Let's do these even if the photon is dead. *ux *= ni_nt; *uy *= ni_nt; *uz = -copysignf(uz1, *uz); if (*layer == 0 || *layer > d_simparam.num_layers) { // transmitted FLOAT uz2 = *uz; UINT64 *ra_arr = d_state_ptr->Tt_ra; if (*layer == 0) { // diffuse reflectance uz2 = -uz2; ra_arr = d_state_ptr->Rd_ra; } UINT32 ia = acosf(uz2) * FP_TWO * RPI * d_simparam.na; UINT32 ir = __fdividef(sqrtf(x*x+y*y), d_simparam.dr); if (ir >= d_simparam.nr) ir = d_simparam.nr - 1; atomicAdd(&ra_arr[ia * d_simparam.nr + ir], (UINT32)(*w * WEIGHT_SCALE)); // Kill the photon. *w = MCML_FP_ZERO; } } } } ////////////////////////////////////////////////////////////////////////////// // Computing the scattering angle and new direction by // sampling the polar deflection angle theta and the // azimuthal angle psi. ////////////////////////////////////////////////////////////////////////////// __device__ void Spin(FLOAT g, FLOAT *ux, FLOAT *uy, FLOAT *uz, UINT64 *rnd_x, UINT32 *rnd_a) { FLOAT cost, sint; // cosine and sine of the polar deflection angle theta FLOAT cosp, sinp; // cosine and sine of the azimuthal angle psi FLOAT psi; FLOAT SIGN; FLOAT temp; FLOAT last_ux, last_uy, last_uz; FLOAT rand; /*********************************************************** * >>>>>>> SpinTheta * Choose (sample) a new theta angle for photon propagation * according to the anisotropy. * * If anisotropy g is 0, then * cos(theta) = 2*rand-1. * otherwise * sample according to the Henyey-Greenstein function. * * Returns the cosine of the polar deflection angle theta. ****/ rand = rand_MWC_co(rnd_x, rnd_a); cost = FP_TWO * rand - FP_ONE; if (g != MCML_FP_ZERO) { temp = __fdividef((FP_ONE - g * g), FP_ONE + g*cost); cost = __fdividef(FP_ONE + g * g - temp*temp, FP_TWO * g); cost = fmaxf(cost, -FP_ONE); cost = fminf(cost, FP_ONE); } sint = sqrtf(FP_ONE - cost * cost); /* spin psi 0-2pi. */ rand = rand_MWC_co(rnd_x, rnd_a); psi = FP_TWO * PI_const * rand; __sincosf(psi, &sinp, &cosp); FLOAT stcp = sint * cosp; FLOAT stsp = sint * sinp; last_ux = *ux; last_uy = *uy; last_uz = *uz; if (fabsf(last_uz) > COSZERO) /* normal incident. */ { *ux = stcp; *uy = stsp; SIGN = ((last_uz) >= MCML_FP_ZERO ? FP_ONE : -FP_ONE); *uz = cost * SIGN; } else /* regular incident. */ { temp = rsqrtf(FP_ONE - last_uz * last_uz); *ux = (stcp * last_ux * last_uz - stsp * last_uy) * temp + last_ux * cost; *uy = (stcp * last_uy * last_uz + stsp * last_ux) * temp + last_uy * cost; *uz = __fdividef(-stcp, temp) + last_uz * cost; } } ////////////////////////////////////////////////////////////////////////////// // Initialize thread states (tstates), created to allow a large // simulation to be broken up into batches // (avoiding display driver time-out errors) ////////////////////////////////////////////////////////////////////////////// __global__ void InitThreadState(GPUThreadStates tstates) { FLOAT photon_x, photon_y, photon_z; FLOAT photon_ux, photon_uy, photon_uz; FLOAT photon_w, photon_sleft; UINT32 photon_layer; // Initialize the photon and copy into photon_<parameter x> LaunchPhoton(&photon_x, &photon_y, &photon_z, &photon_ux, &photon_uy, &photon_uz, &photon_w, &photon_sleft, &photon_layer); // This is the unique ID for each thread (or thread ID = tid) UINT32 tid = blockIdx.x * NUM_THREADS_PER_BLOCK + threadIdx.x; tstates.photon_x[tid] = photon_x; tstates.photon_y[tid] = photon_y; tstates.photon_z[tid] = photon_z; tstates.photon_ux[tid] = photon_ux; tstates.photon_uy[tid] = photon_uy; tstates.photon_uz[tid] = photon_uz; tstates.photon_w[tid] = photon_w; tstates.photon_sleft[tid] = photon_sleft; tstates.photon_layer[tid] = photon_layer; tstates.is_active[tid] = 1; } ////////////////////////////////////////////////////////////////////////////// // Save thread states (tstates), by copying the current photon // data from registers into global memory ////////////////////////////////////////////////////////////////////////////// __device__ void SaveThreadState(SimState *d_state, GPUThreadStates *tstates, FLOAT photon_x, FLOAT photon_y, FLOAT photon_z, FLOAT photon_ux, FLOAT photon_uy, FLOAT photon_uz, FLOAT photon_w, FLOAT photon_sleft, UINT32 photon_layer, UINT64 rnd_x, UINT32 rnd_a, UINT32 is_active) { UINT32 tid = blockIdx.x * NUM_THREADS_PER_BLOCK + threadIdx.x; d_state->x[tid] = rnd_x; d_state->a[tid] = rnd_a; tstates->photon_x[tid] = photon_x; tstates->photon_y[tid] = photon_y; tstates->photon_z[tid] = photon_z; tstates->photon_ux[tid] = photon_ux; tstates->photon_uy[tid] = photon_uy; tstates->photon_uz[tid] = photon_uz; tstates->photon_w[tid] = photon_w; tstates->photon_sleft[tid] = photon_sleft; tstates->photon_layer[tid] = photon_layer; tstates->is_active[tid] = is_active; } ////////////////////////////////////////////////////////////////////////////// // Restore thread states (tstates), by copying the latest photon // data from global memory back into the registers ////////////////////////////////////////////////////////////////////////////// __device__ void RestoreThreadState(SimState *d_state, GPUThreadStates *tstates, FLOAT *photon_x, FLOAT *photon_y, FLOAT *photon_z, FLOAT *photon_ux, FLOAT *photon_uy, FLOAT *photon_uz, FLOAT *photon_w, FLOAT *photon_sleft, UINT32 *photon_layer, UINT64 *rnd_x, UINT32 *rnd_a, UINT32 *is_active) { UINT32 tid = blockIdx.x * NUM_THREADS_PER_BLOCK + threadIdx.x; *rnd_x = d_state->x[tid]; *rnd_a = d_state->a[tid]; *photon_x = tstates->photon_x[tid]; *photon_y = tstates->photon_y[tid]; *photon_z = tstates->photon_z[tid]; *photon_ux = tstates->photon_ux[tid]; *photon_uy = tstates->photon_uy[tid]; *photon_uz = tstates->photon_uz[tid]; *photon_w = tstates->photon_w[tid]; *photon_sleft = tstates->photon_sleft[tid]; *photon_layer = tstates->photon_layer[tid]; *is_active = tstates->is_active[tid]; } ////////////////////////////////////////////////////////////////////////////// // Flush the element at offset <shared_addr> of A_rz in shared memory (s_A_rz) // to the global memory (g_A_rz). <s_A_rz> is of dimension MAX_IR x MAX_IZ. ////////////////////////////////////////////////////////////////////////////// __device__ void Flush_Arz(UINT64 *g_A_rz, UINT64 *s_A_rz, UINT32 shared_addr) { UINT32 ir = shared_addr / MAX_IZ; UINT32 iz = shared_addr - ir * MAX_IZ; UINT32 global_addr = ir * d_simparam.nz + iz; atomicAdd(&g_A_rz[global_addr], s_A_rz[shared_addr]); } ////////////////////////////////////////////////////////////////////////////// // AtomicAdd to Shared Mem for Unsigned Long Long (ULL) data type // Note: Only Fermi architecture supports 64-bit atomicAdd to // both shared memory and global memory ////////////////////////////////////////////////////////////////////////////// __device__ void AtomicAddULL_Shared(UINT64* address, UINT32 add) { #ifdef FERMI atomicAdd(address, (UINT64)add); #else if (atomicAdd((UINT32*)address,add) +add < add) { atomicAdd(((UINT32*)address)+1, 1U); } #endif } ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// // Main Kernel for MCML (Calls the above inline device functions) ////////////////////////////////////////////////////////////////////////////// template <int ignoreAdetection> __global__ void MCMLKernel(SimState d_state, GPUThreadStates tstates) { // photon structure stored in registers FLOAT photon_x, photon_y ,photon_z; FLOAT photon_ux, photon_uy, photon_uz; FLOAT photon_w, photon_sleft; UINT32 photon_layer; // random number seeds UINT64 rnd_x; UINT32 rnd_a; // Flag to indicate if this thread is active UINT32 is_active; // Restore the thread state from global memory. RestoreThreadState(&d_state, &tstates, &photon_x, &photon_y, &photon_z, &photon_ux, &photon_uy, &photon_uz, &photon_w, &photon_sleft, &photon_layer, &rnd_x, &rnd_a, &is_active); ////////////////////////////////////////////////////////////////////////// // Coalesce consecutive weight drops to the same address. UINT32 last_w = 0; UINT32 last_ir = 0, last_iz = 0, last_addr = 0; ////////////////////////////////////////////////////////////////////////// #ifndef USE_TRUE_CACHE // Cache the frequently acessed region of A_rz in the shared memory. __shared__ UINT64 A_rz_shared[MAX_IR*MAX_IZ]; if (ignoreAdetection == 0) { // Clear the cache. for (int i = threadIdx.x; i < MAX_IR*MAX_IZ; i += NUM_THREADS_PER_BLOCK) A_rz_shared[i] = 0; __syncthreads(); } #endif ////////////////////////////////////////////////////////////////////////// // Get the copy of A_rz (in the global memory) this thread writes to. UINT64 *g_A_rz = d_state.A_rz; // + (blockIdx.x % N_A_RZ_COPIES) * (d_simparam.nz * d_simparam.nr); ////////////////////////////////////////////////////////////////////////// for (int iIndex = 0; iIndex < NUM_STEPS; ++iIndex) { // Only process photon if the thread is active. if (is_active) { FLOAT photon_s; // current step size //>>>>>>>>> StepSizeInTissue() in MCML ComputeStepSize(photon_layer, &photon_s, &photon_sleft, &rnd_x, &rnd_a); //>>>>>>>>> HitBoundary() in MCML UINT32 photon_hit = HitBoundary(photon_layer, photon_z, photon_uz, &photon_s, &photon_sleft); Hop(photon_s, photon_ux, photon_uy, photon_uz, &photon_x, &photon_y, &photon_z); if (photon_hit) { FastReflectTransmit(photon_x, photon_y, &d_state, &photon_ux, &photon_uy, &photon_uz, &photon_layer, &photon_w, &rnd_x, &rnd_a); } else { //>>>>>>>>> Drop() in MCML FLOAT dwa = photon_w * d_layerspecs[photon_layer].mua_muas; photon_w -= dwa; // DAVID if (ignoreAdetection == 0) { // automatic __float2uint_rz UINT32 iz = __fdividef(photon_z, d_simparam.dz); // automatic __float2uint_rz UINT32 ir = __fdividef( sqrtf(photon_x * photon_x + photon_y * photon_y), d_simparam.dr); // Only record if photon is not at the edge!! // This will be ignored anyways. if (iz < d_simparam.nz && ir < d_simparam.nr) { UINT32 addr = ir * d_simparam.nz + iz; if (addr != last_addr) { #ifndef USE_TRUE_CACHE // Commit the weight drop to memory. if (last_ir < MAX_IR && last_iz < MAX_IZ) { // Write it to the shared memory. last_addr = last_ir * MAX_IZ + last_iz; AtomicAddULL_Shared(&A_rz_shared[last_addr], last_w); } else #endif { // Write it to the global memory directly. atomicAdd(&g_A_rz[last_addr], (UINT64)last_w); } last_ir = ir; last_iz = iz; last_addr = addr; // Reset the last weight. last_w = 0; } // Accumulate to the last weight. last_w += (UINT32)(dwa * WEIGHT_SCALE); } } //>>>>>>>>> end of Drop() Spin(d_layerspecs[photon_layer].g, &photon_ux, &photon_uy, &photon_uz, &rnd_x, &rnd_a); } /*********************************************************** * >>>>>>>>> Roulette() * If the photon weight is small, the photon packet tries * to survive a roulette. ****/ if (photon_w < WEIGHT) { FLOAT rand = rand_MWC_co(&rnd_x, &rnd_a); if (photon_w != MCML_FP_ZERO && rand < CHANCE) // This photon survives the roulette. { photon_w *= (FP_ONE / CHANCE); } // This photon is terminated. else if (atomicSub(d_state.n_photons_left, 1) > NUM_THREADS) { // Launch a new photon. LaunchPhoton(&photon_x, &photon_y, &photon_z, &photon_ux, &photon_uy, &photon_uz, &photon_w, &photon_sleft, &photon_layer); } else { // No need to process any more photons. is_active = 0; } } } ////////////////////////////////////////////////////////////////////// } // end of the main loop __syncthreads(); ////////////////////////////////////////////////////////////////////////// if (ignoreAdetection == 0) { // Commit the last weight drop to the global memory directly. // NOTE: last_w == 0 if inactive. if (last_w > 0) { UINT32 global_addr = last_ir * d_simparam.nz + last_iz; atomicAdd(&g_A_rz[global_addr], last_w); } #ifndef USE_TRUE_CACHE // Flush A_rz_shared to the global memory. for (int i = threadIdx.x; i < MAX_IR*MAX_IZ; i += NUM_THREADS_PER_BLOCK) { Flush_Arz(g_A_rz, A_rz_shared, i); } #endif } ////////////////////////////////////////////////////////////////////////// // Save the thread state to the global memory. SaveThreadState(&d_state, &tstates, photon_x, photon_y, photon_z, photon_ux, photon_uy, photon_uz, photon_w, photon_sleft, photon_layer, rnd_x, rnd_a, is_active); } ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// //__global__ void sum_A_rz(UINT64 *g_A_rz) //{ // UINT64 sum; // // int n_elems = d_simparam.nz * d_simparam.nr; // int base_ofst, ofst; // // for (base_ofst = blockIdx.x * blockDim.x + threadIdx.x; // base_ofst < n_elems; base_ofst += blockDim.x * gridDim.x) // { // sum = 0; // ofst = base_ofst; //#pragma unroll // for (int i = 0; i < N_A_RZ_COPIES; ++i) // { // sum += g_A_rz[ofst]; // ofst += n_elems; // } // g_A_rz[base_ofst] = sum; // } //} #endif // _CUDAMCML_KERNEL_CU_
027cbe675efb1b4317efa6d86a94559070e5c1df.hip
// !!! This is a file automatically generated by hipify!!! #pragma once #include "PdeFiniteDifference.cuh" namespace detail { /** * Evolve the solution using the time discretizer. * N.B.: solution is a memory tile, as some solver might require the solution history * N.B.2: if provided, workBuffer is a previously allocated buffer used for matrix-vector multiplication */ int _Advance(MemoryTile solution, const MemoryCube timeDiscretizer, MemoryTile workBuffer, const bool overwriteBuffer) { // this is to support multi-step algorithms: each solution is multiplied by a different time discretizer MemoryBuffer _solution(solution.pointer, solution.nRows, solution.memorySpace, solution.mathDomain); MemoryBuffer _buffer(workBuffer.pointer, workBuffer.nRows, workBuffer.memorySpace, workBuffer.mathDomain); MemoryTile _timeDiscretizer(timeDiscretizer.pointer, timeDiscretizer.nRows, timeDiscretizer.nCols, timeDiscretizer.memorySpace, timeDiscretizer.mathDomain); // work out where to write the matrix-vector dot-product MemoryBuffer *_out, *_in; if (overwriteBuffer) { _out = &_buffer; _in = &_solution; } else { _in = &_buffer; _out = &_solution; } const ptr_t inPtr = _in->pointer; const ptr_t outPtr = _out->pointer; // multiplicate each solution with the respective time discretizer for (unsigned i = 0; i < solution.nCols; ++i) { _buffer.pointer = workBuffer.pointer + i * _buffer.TotalSize(); _solution.pointer = solution.pointer + i * _solution.TotalSize(); _timeDiscretizer.pointer = timeDiscretizer.pointer + i * _timeDiscretizer.TotalSize(); _Dot(*_out, _timeDiscretizer, *_in); } int err = hipGetLastError(); // add the partial results into the latest solution for (unsigned i = 1; i < solution.nCols; ++i) { // cumulative sum of each step contribution into the first column _out->pointer = outPtr; _in->pointer = outPtr + i * _in->TotalSize(); // re-use _in for convenience! _AddEqual(*_out, *_in); err = hipGetLastError(); // copy the input solution into the older solution buffers _out->pointer = _in->pointer; _in->pointer = inPtr + i * _in->TotalSize(); _DeviceToDeviceCopy(*_out, *_in); err = hipGetLastError(); } return hipGetLastError(); } int _MakeRungeKuttaGaussLegendre(const double dt, const MemoryTile& spaceDiscretizer, const MemoryTile& timeDiscretizer) { constexpr double a00 = { .25 }; constexpr double sqrt3 = { 1.73205080756888 }; constexpr double a01 = { .25 - sqrt3 / 6.0 }; constexpr double a10 = { .25 + sqrt3 / 6.0 }; constexpr double a11 = { .25 }; MemoryTile eye(timeDiscretizer); _Alloc(eye); _Eye(eye); MemoryTile A(timeDiscretizer); _Alloc(A); _Add(A, eye, spaceDiscretizer, -a00 * dt); MemoryTile B(timeDiscretizer); _Alloc(B); _DeviceToDeviceCopy(B, spaceDiscretizer); _Solve(A, B); _Scale(B, a10 * dt); MemoryTile C(timeDiscretizer); _Alloc(C); _DeviceToDeviceCopy(C, B); _Scale(C, a01 * dt); _AddEqualMatrix(C, eye, MatrixOperation::None, MatrixOperation::None, a11 * dt); MemoryTile C2(timeDiscretizer); _Alloc(C2); _DeviceToDeviceCopy(C2, C); _Multiply(C, spaceDiscretizer, C2, spaceDiscretizer.nRows, C2.nRows); _Free(C2); MemoryTile D(timeDiscretizer); _Alloc(D); _Add(D, C, eye, -1); MemoryTile E(timeDiscretizer); _Alloc(E); _Add(E, eye, B); MemoryTile k_2(timeDiscretizer); _Alloc(k_2); _Multiply(k_2, spaceDiscretizer, E, spaceDiscretizer.nRows, E.nRows); _Solve(D, k_2); MemoryTile F(timeDiscretizer); _Alloc(F); _Add(F, eye, k_2, a01 * dt); MemoryTile k_1(timeDiscretizer); _Alloc(k_1); _Multiply(k_1, spaceDiscretizer, F, spaceDiscretizer.nRows, E.nRows); _Solve(A, k_1); _Eye(timeDiscretizer); _AddEqualMatrix(k_1, k_2); _AddEqualMatrix(timeDiscretizer, k_1, MatrixOperation::None, MatrixOperation::None, .5 * dt); _Free(eye); _Free(A); _Free(B); _Free(C); _Free(D); _Free(E); _Free(F); _Free(k_1); _Free(k_2); return hipGetLastError(); } } EXTERN_C { EXPORT int _MakeTimeDiscretizerAdvectionDiffusion(MemoryCube timeDiscretizer, const MemoryTile spaceDiscretizer, const SolverType solverType, const double dt) { MemoryTile _timeDiscretizer; extractMatrixBufferFromCube(_timeDiscretizer, timeDiscretizer, 0); switch (solverType) { case SolverType::ExplicitEuler: // A = I + L * dt assert(timeDiscretizer.nCubes == 1); _Eye(_timeDiscretizer); _AddEqualMatrix(_timeDiscretizer, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, dt); break; case SolverType::ImplicitEuler: // A = (I - L * dt)^(-1) assert(timeDiscretizer.nCubes == 1); _Eye(_timeDiscretizer); _AddEqualMatrix(_timeDiscretizer, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, -dt); _Invert(_timeDiscretizer); break; case SolverType::CrankNicolson: { // A = (I - L * .5 * dt)^(-1) * (I + L * .5 * dt) assert(timeDiscretizer.nCubes == 1); _Eye(_timeDiscretizer); // copy timeDiscretizer into leftOperator volatile buffer MemoryTile leftOperator(_timeDiscretizer); _Alloc(leftOperator); _DeviceToDeviceCopy(leftOperator, _timeDiscretizer); // left and right operator _AddEqualMatrix(leftOperator, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, -.5 * dt); // A = I - .5 * dt _AddEqualMatrix(timeDiscretizer, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, .5 * dt); // B = I + .5 * dt _Solve(leftOperator, _timeDiscretizer); _Free(leftOperator); } break; case SolverType::RungeKuttaRalston: assert(timeDiscretizer.nCubes == 1); detail::_MakeRungeKuttaDiscretizer<2>({ 0, 2.0 / 3.0, 0 }, { .25, .75 }, dt, spaceDiscretizer, _timeDiscretizer); break; case SolverType::RungeKutta3: assert(timeDiscretizer.nCubes == 1); detail::_MakeRungeKuttaDiscretizer<3>({ 0, .5, .0, -1, 2, 0 }, { 1.0 / 6.0, 2.0 / 3.0, 1.0 / 6.0 }, dt, spaceDiscretizer, _timeDiscretizer); break; case SolverType::RungeKutta4: assert(timeDiscretizer.nCubes == 1); detail::_MakeRungeKuttaDiscretizer<4>({ 0, .5, .0, 0, .5, 0, 0, 0, 1, 0 }, { 1.0 / 6.0, 1.0 / 3.0, 1.0 / 3.0, 1.0 / 6.0 }, dt, spaceDiscretizer, _timeDiscretizer); break; case SolverType::RungeKuttaThreeEight: assert(timeDiscretizer.nCubes == 1); detail::_MakeRungeKuttaDiscretizer<4>({ 0, 1.0 / 3.0, .0, -1.0 / 3.0, 1, 0, 1, -1, 1, 0 }, { 1.0 / 8.0, 3.0 / 8.0, 3.0 / 8.0, 1.0 / 8.0 }, dt, spaceDiscretizer, _timeDiscretizer); break; case SolverType::RungeKuttaGaussLegendre4: assert(timeDiscretizer.nCubes == 1); detail::_MakeRungeKuttaGaussLegendre(dt, spaceDiscretizer, _timeDiscretizer); break; case SolverType::RichardsonExtrapolation2: { assert(timeDiscretizer.nCubes == 1); _Eye(_timeDiscretizer); _AddEqualMatrix(_timeDiscretizer, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, -dt); _Invert(_timeDiscretizer); _Scale(timeDiscretizer, -1.0); MemoryTile halfIteration(_timeDiscretizer); _Alloc(halfIteration); _Eye(halfIteration); _AddEqualMatrix(halfIteration, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, -.5 * dt); MemoryTile halfIterationSquared(_timeDiscretizer); _Alloc(halfIterationSquared); _Multiply(halfIterationSquared, halfIteration, halfIteration, halfIteration.nRows, halfIteration.nRows); _Invert(halfIterationSquared); _AddEqualMatrix(_timeDiscretizer, halfIterationSquared, MatrixOperation::None, MatrixOperation::None, 2.0); _Free(halfIteration); _Free(halfIterationSquared); } break; case SolverType::RichardsonExtrapolation3: { assert(timeDiscretizer.nCubes == 1); _Eye(_timeDiscretizer); _AddEqualMatrix(_timeDiscretizer, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, -dt); _Invert(_timeDiscretizer); // - F _Scale(_timeDiscretizer, -1.0); MemoryTile halfIteration(_timeDiscretizer); _Alloc(halfIteration); _Eye(halfIteration); _AddEqualMatrix(halfIteration, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, -.5 * dt); MemoryTile halfIterationSquared(_timeDiscretizer); _Alloc(halfIterationSquared); _Multiply(halfIterationSquared, halfIteration, halfIteration, halfIteration.nRows, halfIteration.nRows); _Invert(halfIterationSquared); // H MemoryTile quarterIteration(_timeDiscretizer); _Alloc(quarterIteration); _Eye(quarterIteration); _AddEqualMatrix(quarterIteration, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, -.25 * dt); MemoryTile quarterIterationFour(_timeDiscretizer); _Alloc(quarterIterationFour); _Multiply(halfIteration, quarterIteration, quarterIteration, quarterIteration.nRows, quarterIteration.nRows); // re-use halfIteration for convenience _Multiply(quarterIterationFour, halfIteration, halfIteration, halfIteration.nRows, halfIteration.nRows); _Invert(quarterIterationFour); // Q // 2 * H - F _AddEqualMatrix(_timeDiscretizer, halfIterationSquared, MatrixOperation::None, MatrixOperation::None, 2.0); // -(2 * H - F) / 3 _Scale(_timeDiscretizer, -1.0 / 3.0); // 2 * Q - H _Scale(halfIterationSquared, -1); _AddEqualMatrix(halfIterationSquared, quarterIterationFour, MatrixOperation::None, MatrixOperation::None, 2.0); // (2 * Q - H) * 4/3 - (2 * H - F) / 3 _AddEqualMatrix(_timeDiscretizer, halfIterationSquared, MatrixOperation::None, MatrixOperation::None, 4.0 / 3.0); _Free(halfIteration); _Free(halfIterationSquared); _Free(quarterIteration); _Free(quarterIterationFour); } break; case SolverType::AdamsBashforth2: // A_{n + 1} = (I + L * 1.5 * dt) assert(timeDiscretizer.nCubes == 2); _Eye(_timeDiscretizer); _AddEqual(_timeDiscretizer, spaceDiscretizer, 1.5 * dt); // A = I + 1.5 * dt // A_{n} = - L * .5 * dt _timeDiscretizer.pointer += _timeDiscretizer.nRows * _timeDiscretizer.nCols * _timeDiscretizer.ElementarySize(); _DeviceToDeviceCopy(_timeDiscretizer, spaceDiscretizer); _Scale(_timeDiscretizer, -.5 * dt); break; case SolverType::AdamsMouldon2: { // A_{n + 1} = (I - L * 5 / 12 * dt)^(-1) * (I + L * 2.0 / 3.0 * dt) assert(timeDiscretizer.nCubes == 2); // copy timeDiscretizer into leftOperator volatile buffer MemoryTile leftOperator(_timeDiscretizer); _Alloc(leftOperator); _Eye(leftOperator); _AddEqual(leftOperator, spaceDiscretizer, -5.0 / 12.0 * dt); // A = I - .5 * dt _Eye(_timeDiscretizer); _AddEqual(_timeDiscretizer, spaceDiscretizer, 2.0 / 3.0 * dt); // A = I - .5 * dt _Solve(leftOperator, _timeDiscretizer); // A_{n} = (I - L * 5 / 12 * dt)^(-1) * (- L * 1.0 / 12.0 * dt) _timeDiscretizer.pointer += _timeDiscretizer.nRows * _timeDiscretizer.nCols * _timeDiscretizer.ElementarySize(); _DeviceToDeviceCopy(_timeDiscretizer, spaceDiscretizer); _Scale(_timeDiscretizer, -1.0 / 12.0 * dt); _Solve(leftOperator, _timeDiscretizer); } break; default: return CudaKernelException::_NotImplementedException; } return hipGetLastError(); } EXPORT int _MakeTimeDiscretizerWaveEquation(MemoryCube timeDiscretizer, const MemoryTile spaceDiscretizer, const SolverType solverType, const double dt) { MemoryTile _timeDiscretizer; extractMatrixBufferFromCube(_timeDiscretizer, timeDiscretizer, 0); switch (solverType) { case SolverType::ExplicitEuler: { // A = I assert(timeDiscretizer.nCubes == 1); _Eye(_timeDiscretizer); break; } case SolverType::ImplicitEuler: { // A = (I - L * dt^2)^(-1) assert(timeDiscretizer.nCubes == 1); _Eye(_timeDiscretizer); _AddEqualMatrix(_timeDiscretizer, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, -dt * dt); _Invert(_timeDiscretizer); break; } default: return CudaKernelException::_NotImplementedException; } return hipGetLastError(); } }
027cbe675efb1b4317efa6d86a94559070e5c1df.cu
#pragma once #include "PdeFiniteDifference.cuh" namespace detail { /** * Evolve the solution using the time discretizer. * N.B.: solution is a memory tile, as some solver might require the solution history * N.B.2: if provided, workBuffer is a previously allocated buffer used for matrix-vector multiplication */ int _Advance(MemoryTile solution, const MemoryCube timeDiscretizer, MemoryTile workBuffer, const bool overwriteBuffer) { // this is to support multi-step algorithms: each solution is multiplied by a different time discretizer MemoryBuffer _solution(solution.pointer, solution.nRows, solution.memorySpace, solution.mathDomain); MemoryBuffer _buffer(workBuffer.pointer, workBuffer.nRows, workBuffer.memorySpace, workBuffer.mathDomain); MemoryTile _timeDiscretizer(timeDiscretizer.pointer, timeDiscretizer.nRows, timeDiscretizer.nCols, timeDiscretizer.memorySpace, timeDiscretizer.mathDomain); // work out where to write the matrix-vector dot-product MemoryBuffer *_out, *_in; if (overwriteBuffer) { _out = &_buffer; _in = &_solution; } else { _in = &_buffer; _out = &_solution; } const ptr_t inPtr = _in->pointer; const ptr_t outPtr = _out->pointer; // multiplicate each solution with the respective time discretizer for (unsigned i = 0; i < solution.nCols; ++i) { _buffer.pointer = workBuffer.pointer + i * _buffer.TotalSize(); _solution.pointer = solution.pointer + i * _solution.TotalSize(); _timeDiscretizer.pointer = timeDiscretizer.pointer + i * _timeDiscretizer.TotalSize(); _Dot(*_out, _timeDiscretizer, *_in); } int err = cudaGetLastError(); // add the partial results into the latest solution for (unsigned i = 1; i < solution.nCols; ++i) { // cumulative sum of each step contribution into the first column _out->pointer = outPtr; _in->pointer = outPtr + i * _in->TotalSize(); // re-use _in for convenience! _AddEqual(*_out, *_in); err = cudaGetLastError(); // copy the input solution into the older solution buffers _out->pointer = _in->pointer; _in->pointer = inPtr + i * _in->TotalSize(); _DeviceToDeviceCopy(*_out, *_in); err = cudaGetLastError(); } return cudaGetLastError(); } int _MakeRungeKuttaGaussLegendre(const double dt, const MemoryTile& spaceDiscretizer, const MemoryTile& timeDiscretizer) { constexpr double a00 = { .25 }; constexpr double sqrt3 = { 1.73205080756888 }; constexpr double a01 = { .25 - sqrt3 / 6.0 }; constexpr double a10 = { .25 + sqrt3 / 6.0 }; constexpr double a11 = { .25 }; MemoryTile eye(timeDiscretizer); _Alloc(eye); _Eye(eye); MemoryTile A(timeDiscretizer); _Alloc(A); _Add(A, eye, spaceDiscretizer, -a00 * dt); MemoryTile B(timeDiscretizer); _Alloc(B); _DeviceToDeviceCopy(B, spaceDiscretizer); _Solve(A, B); _Scale(B, a10 * dt); MemoryTile C(timeDiscretizer); _Alloc(C); _DeviceToDeviceCopy(C, B); _Scale(C, a01 * dt); _AddEqualMatrix(C, eye, MatrixOperation::None, MatrixOperation::None, a11 * dt); MemoryTile C2(timeDiscretizer); _Alloc(C2); _DeviceToDeviceCopy(C2, C); _Multiply(C, spaceDiscretizer, C2, spaceDiscretizer.nRows, C2.nRows); _Free(C2); MemoryTile D(timeDiscretizer); _Alloc(D); _Add(D, C, eye, -1); MemoryTile E(timeDiscretizer); _Alloc(E); _Add(E, eye, B); MemoryTile k_2(timeDiscretizer); _Alloc(k_2); _Multiply(k_2, spaceDiscretizer, E, spaceDiscretizer.nRows, E.nRows); _Solve(D, k_2); MemoryTile F(timeDiscretizer); _Alloc(F); _Add(F, eye, k_2, a01 * dt); MemoryTile k_1(timeDiscretizer); _Alloc(k_1); _Multiply(k_1, spaceDiscretizer, F, spaceDiscretizer.nRows, E.nRows); _Solve(A, k_1); _Eye(timeDiscretizer); _AddEqualMatrix(k_1, k_2); _AddEqualMatrix(timeDiscretizer, k_1, MatrixOperation::None, MatrixOperation::None, .5 * dt); _Free(eye); _Free(A); _Free(B); _Free(C); _Free(D); _Free(E); _Free(F); _Free(k_1); _Free(k_2); return cudaGetLastError(); } } EXTERN_C { EXPORT int _MakeTimeDiscretizerAdvectionDiffusion(MemoryCube timeDiscretizer, const MemoryTile spaceDiscretizer, const SolverType solverType, const double dt) { MemoryTile _timeDiscretizer; extractMatrixBufferFromCube(_timeDiscretizer, timeDiscretizer, 0); switch (solverType) { case SolverType::ExplicitEuler: // A = I + L * dt assert(timeDiscretizer.nCubes == 1); _Eye(_timeDiscretizer); _AddEqualMatrix(_timeDiscretizer, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, dt); break; case SolverType::ImplicitEuler: // A = (I - L * dt)^(-1) assert(timeDiscretizer.nCubes == 1); _Eye(_timeDiscretizer); _AddEqualMatrix(_timeDiscretizer, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, -dt); _Invert(_timeDiscretizer); break; case SolverType::CrankNicolson: { // A = (I - L * .5 * dt)^(-1) * (I + L * .5 * dt) assert(timeDiscretizer.nCubes == 1); _Eye(_timeDiscretizer); // copy timeDiscretizer into leftOperator volatile buffer MemoryTile leftOperator(_timeDiscretizer); _Alloc(leftOperator); _DeviceToDeviceCopy(leftOperator, _timeDiscretizer); // left and right operator _AddEqualMatrix(leftOperator, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, -.5 * dt); // A = I - .5 * dt _AddEqualMatrix(timeDiscretizer, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, .5 * dt); // B = I + .5 * dt _Solve(leftOperator, _timeDiscretizer); _Free(leftOperator); } break; case SolverType::RungeKuttaRalston: assert(timeDiscretizer.nCubes == 1); detail::_MakeRungeKuttaDiscretizer<2>({ 0, 2.0 / 3.0, 0 }, { .25, .75 }, dt, spaceDiscretizer, _timeDiscretizer); break; case SolverType::RungeKutta3: assert(timeDiscretizer.nCubes == 1); detail::_MakeRungeKuttaDiscretizer<3>({ 0, .5, .0, -1, 2, 0 }, { 1.0 / 6.0, 2.0 / 3.0, 1.0 / 6.0 }, dt, spaceDiscretizer, _timeDiscretizer); break; case SolverType::RungeKutta4: assert(timeDiscretizer.nCubes == 1); detail::_MakeRungeKuttaDiscretizer<4>({ 0, .5, .0, 0, .5, 0, 0, 0, 1, 0 }, { 1.0 / 6.0, 1.0 / 3.0, 1.0 / 3.0, 1.0 / 6.0 }, dt, spaceDiscretizer, _timeDiscretizer); break; case SolverType::RungeKuttaThreeEight: assert(timeDiscretizer.nCubes == 1); detail::_MakeRungeKuttaDiscretizer<4>({ 0, 1.0 / 3.0, .0, -1.0 / 3.0, 1, 0, 1, -1, 1, 0 }, { 1.0 / 8.0, 3.0 / 8.0, 3.0 / 8.0, 1.0 / 8.0 }, dt, spaceDiscretizer, _timeDiscretizer); break; case SolverType::RungeKuttaGaussLegendre4: assert(timeDiscretizer.nCubes == 1); detail::_MakeRungeKuttaGaussLegendre(dt, spaceDiscretizer, _timeDiscretizer); break; case SolverType::RichardsonExtrapolation2: { assert(timeDiscretizer.nCubes == 1); _Eye(_timeDiscretizer); _AddEqualMatrix(_timeDiscretizer, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, -dt); _Invert(_timeDiscretizer); _Scale(timeDiscretizer, -1.0); MemoryTile halfIteration(_timeDiscretizer); _Alloc(halfIteration); _Eye(halfIteration); _AddEqualMatrix(halfIteration, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, -.5 * dt); MemoryTile halfIterationSquared(_timeDiscretizer); _Alloc(halfIterationSquared); _Multiply(halfIterationSquared, halfIteration, halfIteration, halfIteration.nRows, halfIteration.nRows); _Invert(halfIterationSquared); _AddEqualMatrix(_timeDiscretizer, halfIterationSquared, MatrixOperation::None, MatrixOperation::None, 2.0); _Free(halfIteration); _Free(halfIterationSquared); } break; case SolverType::RichardsonExtrapolation3: { assert(timeDiscretizer.nCubes == 1); _Eye(_timeDiscretizer); _AddEqualMatrix(_timeDiscretizer, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, -dt); _Invert(_timeDiscretizer); // - F _Scale(_timeDiscretizer, -1.0); MemoryTile halfIteration(_timeDiscretizer); _Alloc(halfIteration); _Eye(halfIteration); _AddEqualMatrix(halfIteration, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, -.5 * dt); MemoryTile halfIterationSquared(_timeDiscretizer); _Alloc(halfIterationSquared); _Multiply(halfIterationSquared, halfIteration, halfIteration, halfIteration.nRows, halfIteration.nRows); _Invert(halfIterationSquared); // H MemoryTile quarterIteration(_timeDiscretizer); _Alloc(quarterIteration); _Eye(quarterIteration); _AddEqualMatrix(quarterIteration, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, -.25 * dt); MemoryTile quarterIterationFour(_timeDiscretizer); _Alloc(quarterIterationFour); _Multiply(halfIteration, quarterIteration, quarterIteration, quarterIteration.nRows, quarterIteration.nRows); // re-use halfIteration for convenience _Multiply(quarterIterationFour, halfIteration, halfIteration, halfIteration.nRows, halfIteration.nRows); _Invert(quarterIterationFour); // Q // 2 * H - F _AddEqualMatrix(_timeDiscretizer, halfIterationSquared, MatrixOperation::None, MatrixOperation::None, 2.0); // -(2 * H - F) / 3 _Scale(_timeDiscretizer, -1.0 / 3.0); // 2 * Q - H _Scale(halfIterationSquared, -1); _AddEqualMatrix(halfIterationSquared, quarterIterationFour, MatrixOperation::None, MatrixOperation::None, 2.0); // (2 * Q - H) * 4/3 - (2 * H - F) / 3 _AddEqualMatrix(_timeDiscretizer, halfIterationSquared, MatrixOperation::None, MatrixOperation::None, 4.0 / 3.0); _Free(halfIteration); _Free(halfIterationSquared); _Free(quarterIteration); _Free(quarterIterationFour); } break; case SolverType::AdamsBashforth2: // A_{n + 1} = (I + L * 1.5 * dt) assert(timeDiscretizer.nCubes == 2); _Eye(_timeDiscretizer); _AddEqual(_timeDiscretizer, spaceDiscretizer, 1.5 * dt); // A = I + 1.5 * dt // A_{n} = - L * .5 * dt _timeDiscretizer.pointer += _timeDiscretizer.nRows * _timeDiscretizer.nCols * _timeDiscretizer.ElementarySize(); _DeviceToDeviceCopy(_timeDiscretizer, spaceDiscretizer); _Scale(_timeDiscretizer, -.5 * dt); break; case SolverType::AdamsMouldon2: { // A_{n + 1} = (I - L * 5 / 12 * dt)^(-1) * (I + L * 2.0 / 3.0 * dt) assert(timeDiscretizer.nCubes == 2); // copy timeDiscretizer into leftOperator volatile buffer MemoryTile leftOperator(_timeDiscretizer); _Alloc(leftOperator); _Eye(leftOperator); _AddEqual(leftOperator, spaceDiscretizer, -5.0 / 12.0 * dt); // A = I - .5 * dt _Eye(_timeDiscretizer); _AddEqual(_timeDiscretizer, spaceDiscretizer, 2.0 / 3.0 * dt); // A = I - .5 * dt _Solve(leftOperator, _timeDiscretizer); // A_{n} = (I - L * 5 / 12 * dt)^(-1) * (- L * 1.0 / 12.0 * dt) _timeDiscretizer.pointer += _timeDiscretizer.nRows * _timeDiscretizer.nCols * _timeDiscretizer.ElementarySize(); _DeviceToDeviceCopy(_timeDiscretizer, spaceDiscretizer); _Scale(_timeDiscretizer, -1.0 / 12.0 * dt); _Solve(leftOperator, _timeDiscretizer); } break; default: return CudaKernelException::_NotImplementedException; } return cudaGetLastError(); } EXPORT int _MakeTimeDiscretizerWaveEquation(MemoryCube timeDiscretizer, const MemoryTile spaceDiscretizer, const SolverType solverType, const double dt) { MemoryTile _timeDiscretizer; extractMatrixBufferFromCube(_timeDiscretizer, timeDiscretizer, 0); switch (solverType) { case SolverType::ExplicitEuler: { // A = I assert(timeDiscretizer.nCubes == 1); _Eye(_timeDiscretizer); break; } case SolverType::ImplicitEuler: { // A = (I - L * dt^2)^(-1) assert(timeDiscretizer.nCubes == 1); _Eye(_timeDiscretizer); _AddEqualMatrix(_timeDiscretizer, spaceDiscretizer, MatrixOperation::None, MatrixOperation::None, -dt * dt); _Invert(_timeDiscretizer); break; } default: return CudaKernelException::_NotImplementedException; } return cudaGetLastError(); } }
1bc4535b714e0d28829658fd90dcd03e6ba9f943.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/linspace_kernel.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/copy_kernel.h" #include "paddle/phi/kernels/funcs/data_type_transform.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { template <typename T> __global__ void LinspaceKernelInner( T start, T stop, double step, int64_t size, T* out) { int64_t index = blockIdx.x * blockDim.x + threadIdx.x; for (; index < size; index += blockDim.x * gridDim.x) { if (index < size / 2) { out[index] = static_cast<T>(start + step * index); } else { out[index] = static_cast<T>(stop - step * (size - index - 1)); } } } template <typename T> __global__ void LinspaceSpecialKernel(T start, T* out) { out[0] = static_cast<T>(start); } template <typename T, typename Context> void LinspaceKernel(const Context& ctx, const DenseTensor& start, const DenseTensor& stop, const DenseTensor& number, DataType dtype, DenseTensor* out) { auto start_t = phi::funcs::TransDataType(ctx, start, dtype); auto stop_t = phi::funcs::TransDataType(ctx, stop, dtype); DenseTensor n_start; DenseTensor n_stop; DenseTensor n_num; phi::Copy(ctx, start_t, phi::CPUPlace(), false, &n_start); T start_data = n_start.data<T>()[0]; phi::Copy(ctx, stop_t, phi::CPUPlace(), false, &n_stop); T stop_data = n_stop.data<T>()[0]; phi::Copy(ctx, number, phi::CPUPlace(), false, &n_num); int64_t num = static_cast<int64_t>(n_num.data<int32_t>()[0]); PADDLE_ENFORCE_GT( num, 0, phi::errors::InvalidArgument("The num of linspace op should be larger " "than 0, but received num is %d", num)); out->Resize(phi::make_ddim({num})); T* out_data = ctx.template Alloc<T>(out); double step = 0; auto stream = ctx.stream(); int block = 512; int grid = (num + block - 1) / block; if (num != 1) { step = (static_cast<double>(stop_data - start_data)) / (num - 1); hipLaunchKernelGGL(( LinspaceKernelInner<T>), dim3(grid), dim3(block), 0, stream, start_data, stop_data, step, num, out_data); } else { hipLaunchKernelGGL(( LinspaceSpecialKernel<T>), dim3(grid), dim3(block), 0, stream, start_data, out_data); } } } // namespace phi PD_REGISTER_KERNEL(linspace, GPU, ALL_LAYOUT, phi::LinspaceKernel, float, int32_t, int64_t, double) {}
1bc4535b714e0d28829658fd90dcd03e6ba9f943.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/linspace_kernel.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/copy_kernel.h" #include "paddle/phi/kernels/funcs/data_type_transform.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { template <typename T> __global__ void LinspaceKernelInner( T start, T stop, double step, int64_t size, T* out) { int64_t index = blockIdx.x * blockDim.x + threadIdx.x; for (; index < size; index += blockDim.x * gridDim.x) { if (index < size / 2) { out[index] = static_cast<T>(start + step * index); } else { out[index] = static_cast<T>(stop - step * (size - index - 1)); } } } template <typename T> __global__ void LinspaceSpecialKernel(T start, T* out) { out[0] = static_cast<T>(start); } template <typename T, typename Context> void LinspaceKernel(const Context& ctx, const DenseTensor& start, const DenseTensor& stop, const DenseTensor& number, DataType dtype, DenseTensor* out) { auto start_t = phi::funcs::TransDataType(ctx, start, dtype); auto stop_t = phi::funcs::TransDataType(ctx, stop, dtype); DenseTensor n_start; DenseTensor n_stop; DenseTensor n_num; phi::Copy(ctx, start_t, phi::CPUPlace(), false, &n_start); T start_data = n_start.data<T>()[0]; phi::Copy(ctx, stop_t, phi::CPUPlace(), false, &n_stop); T stop_data = n_stop.data<T>()[0]; phi::Copy(ctx, number, phi::CPUPlace(), false, &n_num); int64_t num = static_cast<int64_t>(n_num.data<int32_t>()[0]); PADDLE_ENFORCE_GT( num, 0, phi::errors::InvalidArgument("The num of linspace op should be larger " "than 0, but received num is %d", num)); out->Resize(phi::make_ddim({num})); T* out_data = ctx.template Alloc<T>(out); double step = 0; auto stream = ctx.stream(); int block = 512; int grid = (num + block - 1) / block; if (num != 1) { step = (static_cast<double>(stop_data - start_data)) / (num - 1); LinspaceKernelInner<T><<<grid, block, 0, stream>>>( start_data, stop_data, step, num, out_data); } else { LinspaceSpecialKernel<T><<<grid, block, 0, stream>>>(start_data, out_data); } } } // namespace phi PD_REGISTER_KERNEL(linspace, GPU, ALL_LAYOUT, phi::LinspaceKernel, float, int32_t, int64_t, double) {}
39ecd47e7f3f834804546c0dbf94cc23131b2fae.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example is almost the same as example 27 which uses 3xTF32 to run GEMM. The only difference is that this example uses 3xtf32 on complex gemm. To enable this feature, the only change needs to make is to change OpMultiplyAddComplex to OpMultiplyAddComplexFastF32. */ #include <iostream> #include <vector> #include <limits> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_complex.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm_complex.h" #include "cutlass/util/reference/host/tensor_reduce.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/error_metrics.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// Result structure struct Result { double runtime_ms; double gflops; cutlass::Status status; hipError_t error; int m, n, k; double l2_norm_3xtf32_vs_fp64; double l2_norm_1xtf32_vs_fp64; double l2_norm_fp32_vs_fp64; // ctor Result( int m, int n, int k, double runtime_ms, double gflops, double l2_norm_3xtf32_vs_fp64, double l2_norm_1xtf32_vs_fp64, double l2_norm_fp32_vs_fp64) : m(m), n(n), k(k), runtime_ms(runtime_ms), gflops(gflops), l2_norm_3xtf32_vs_fp64(l2_norm_3xtf32_vs_fp64), l2_norm_1xtf32_vs_fp64(l2_norm_1xtf32_vs_fp64), l2_norm_fp32_vs_fp64(l2_norm_fp32_vs_fp64) {} Result() {} // // Methods // static void print_csv_header() { std::cout << "M,N,K,Runtime(ms),GFLOPS,3xTF32_vs_FP64,1xTF32_vs_FP64,FP32_vs_FP64" << std::endl; } void print_csv_row() { std::cout << m << "," << n << "," << k << "," << runtime_ms << "," << gflops << "," << l2_norm_3xtf32_vs_fp64 << "," << l2_norm_1xtf32_vs_fp64 << "," << l2_norm_fp32_vs_fp64 << std::endl; } }; std::vector<Result> results; /////////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::gemm::GemmCoord problem_size; float alpha; float beta; std::string rand_mode; int iterations; int seed; bool benchmark; Options(): help(false), problem_size({3456, 4096, 4096}), iterations(20), seed(1), alpha(1), beta(), rand_mode("uniform"), benchmark(false) { } bool valid() { return true; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } cmd.get_cmd_line_argument("m", problem_size.m()); cmd.get_cmd_line_argument("n", problem_size.n()); cmd.get_cmd_line_argument("k", problem_size.k()); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("seed", seed); cmd.get_cmd_line_argument("rand_mode", rand_mode); if (cmd.check_cmd_line_flag("benchmark")) { benchmark = true; } } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "29_ampere_3xtf32_fast_accurate_tensorop_complex_gemm example\n\n" << " This example uses the CUTLASS Library to emulate FP32 complex GEMM computations with TF32 tensor cores.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --m=<int> GEMM M dimension\n" << " --n=<int> GEMM N dimension\n" << " --k=<int> GEMM K dimension\n" << " --alpha=<f32> Epilogue scalar alpha\n" << " --beta=<f32> Epilogue scalar beta\n\n" << " --rand_mode=<string> gauss / uniform*\n\n" << " --seed=<int> Random number seed (1*)\n\n" << " --iterations=<int> Number of profiling iterations to perform.\n\n" << " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n\n"; out << "\n\nExamples:\n\n" << "$ ./examples/29_ampere_3xtf32_fast_accurate_tensorop_complex_gemm/29_3xtf32_complex_gemm --m=1024 --n=512 \\\n" << " --alpha=2 --beta=0.707 \n\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of real-valued multiply-adds int64_t fmas = problem_size.product(); // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::RowMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<64, 64, 16>; // <- threadblock tile M = 128, N = 128, K = 16 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<32, 32, 16>; // <- warp tile M = 64, N = 64, K = 16 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< cutlass::complex<float>, // <- data type of output matrix 1, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too cutlass::complex<float>, // <- data type of accumulator cutlass::complex<float>>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 3; // Transform constexpr cutlass::ComplexTransform TransformA = cutlass::ComplexTransform::kNone; constexpr cutlass::ComplexTransform TransformB = cutlass::ComplexTransform::kNone; // // Gemm Operators (Gemm_3xTF32, Gemm_1xTF32, GEMM_F32, GEMM_F64) // // Gemm_3xTF32 using Gemm_3xTF32 = cutlass::gemm::device::GemmComplex< cutlass::complex<float>, LayoutInputA, cutlass::complex<float>, LayoutInputB, cutlass::complex<float>, LayoutOutput, cutlass::complex<float>, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages, TransformA, TransformB, cutlass::arch::OpMultiplyAddComplexFastF32>; // Gemm_1xTF32 using Gemm_1xTF32 = cutlass::gemm::device::GemmComplex< cutlass::complex<float>, LayoutInputA, cutlass::complex<float>, LayoutInputB, cutlass::complex<float>, LayoutOutput, cutlass::complex<float>, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages, TransformA, TransformB, cutlass::arch::OpMultiplyAddComplex>; bool run(Options &options) { // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size = options.problem_size; //////////////////////////////////////////////////////////////////////////////// /// 1. Initialize F32 Precision input tensors using CUTLASS helper functions //////////////////////////////////////////////////////////////////////////////// cutlass::HostTensor<cutlass::complex<float>, LayoutInputA> tensor_a_F32(problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<cutlass::complex<float>, LayoutInputB> tensor_b_F32(problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<cutlass::complex<float>, LayoutOutput> tensor_c_F32(problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<cutlass::complex<float>, LayoutOutput> tensor_d_F32(problem_size.mn()); // <- Create matrix D with dimensions M x N if (options.rand_mode == "uniform") { const float min = -1; const float max = 1; // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a_F32.host_view(), options.seed, double(max), double(min)); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b_F32.host_view(), options.seed, double(max), double(min)); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c_F32.host_view(), options.seed, double(max), double(min)); // <- Fill matrix C on host with uniform-distribution random data } else if (options.rand_mode == "gauss") { // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomGaussian( tensor_a_F32.host_view(), options.seed, double(0), double(5)); // <- Fill matrix A on host with gaussian-distribution random data cutlass::reference::host::TensorFillRandomGaussian( tensor_b_F32.host_view(), options.seed, double(0), double(5)); // <- Fill matrix B on host with gaussian-distribution random data cutlass::reference::host::TensorFillRandomGaussian( tensor_c_F32.host_view(), options.seed, double(0), double(5)); // <- Fill matrix C on host with gaussian-distribution random data } cutlass::reference::host::TensorFill( tensor_d_F32.host_view()); // <- fill matrix D on host with zeros // Copy data from host to GPU tensor_a_F32.sync_device(); tensor_b_F32.sync_device(); tensor_c_F32.sync_device(); tensor_d_F32.sync_device(); //////////////////////////////////////////////////////////////////////////////// /// 2. Initialize F64 tensors using the same values used for F32 //////////////////////////////////////////////////////////////////////////////// // Gemm input operands (A, B, C) cutlass::HostTensor<cutlass::complex<double>, LayoutInputA> tensor_a_F64(problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<cutlass::complex<double>, LayoutInputB> tensor_b_F64(problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_c_F64(problem_size.mn()); // <- Create matrix C with dimensions M x N // Gemm output (D) for GEMM_F64 cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_d_F64(problem_size.mn()); // <- Create matrix D with dimensions M x N // Gemm output (D) for GEMM_3xTF32 cutlass::HostTensor<cutlass::complex<float>, LayoutOutput> tensor_d_3xTF32(problem_size.mn()); // <- Create matrix D with dimensions M x N // Gemm output (D) for GEMM_1xTF32 cutlass::HostTensor<cutlass::complex<float>, LayoutOutput> tensor_d_1xTF32(problem_size.mn()); // <- Create matrix D with dimensions M x N // Copy values from the DP tensors cutlass::reference::host::TensorCopy(tensor_a_F64.host_view(), tensor_a_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_b_F64.host_view(), tensor_b_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_c_F64.host_view(), tensor_c_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_F64.host_view(), tensor_d_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_3xTF32.host_view(), tensor_d_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_1xTF32.host_view(), tensor_d_F32.host_view()); // Copy data from host to GPU tensor_a_F64.sync_device(); tensor_b_F64.sync_device(); tensor_c_F64.sync_device(); tensor_d_F64.sync_device(); tensor_d_3xTF32.sync_device(); tensor_d_1xTF32.sync_device(); // Initialize alpha and beta for dot product computation cutlass::complex<float> alpha = cutlass::complex<float>(options.alpha); cutlass::complex<float> beta = cutlass::complex<float>(options.beta); // Split K dimension into 1 partitions int split_k_slices = 1; //////////////////////////////////////////////////////////////////////////////// /// 3. Run 3xTF32 kernel within a profiling loop //////////////////////////////////////////////////////////////////////////////// // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm_3xTF32::Arguments arguments_3xtf32{problem_size, // <- problem size of matrix multiplication tensor_a_F32.device_ref(), // <- reference to matrix A on device tensor_b_F32.device_ref(), // <- reference to matrix B on device tensor_c_F32.device_ref(), // <- reference to matrix C on device tensor_d_3xTF32.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size_3xtf32 = Gemm_3xTF32::get_workspace_size(arguments_3xtf32); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace_3xtf32(workspace_size_3xtf32); // Instantiate CUTLASS kernel depending on templates Gemm_3xTF32 gemm_op; // Check the problem size is supported or not cutlass::Status status_3xtf32 = gemm_op.can_implement(arguments_3xtf32); CUTLASS_CHECK(status_3xtf32); // Initialize CUTLASS kernel with arguments and workspace pointer status_3xtf32 = gemm_op.initialize(arguments_3xtf32, workspace_3xtf32.get()); CUTLASS_CHECK(status_3xtf32); // Result structure Result result; // // Construct events // hipEvent_t events[2]; for (auto & event : events) { result.error = hipEventCreate(&event); if (result.error != hipSuccess) { std::cerr << "hipEventCreate() failed: " << hipGetErrorString(result.error) << std::endl; return false; } } // Record an event at the start of a series of GEMMs result.error = hipEventRecord(events[0]); if (result.error != hipSuccess) { std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl; return false; } // // Run profiling loop // for (int iter = 0; iter < options.iterations; ++iter) { // Launch initialized CUTLASS kernel status_3xtf32 = gemm_op(); CUTLASS_CHECK(status_3xtf32); } // // Stop profiling loop // // Record an event when the GEMMs are complete result.error = hipEventRecord(events[1]); if (result.error != hipSuccess) { std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl; return false; } // Wait for work on the device to complete. result.error = hipEventSynchronize(events[1]); if (result.error != hipSuccess) { std::cerr << "hipEventSynchronize() failed: " << hipGetErrorString(result.error) << std::endl; return false; } // Measure elapsed runtime float runtime_ms = 0; result.error = hipEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != hipSuccess) { std::cerr << "cudaEventElapsed() failed: " << hipGetErrorString(result.error) << std::endl; return false; } // Compute average runtime and GFLOPs. result.m = problem_size.m(); result.n = problem_size.n(); result.k = problem_size.k(); result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // Cleanup for (auto event : events) { (void)hipEventDestroy(event); } tensor_d_3xTF32.sync_host(); //////////////////////////////////////////////////////////////////////////////// /// 4. Run TF32 kernel without profiling loop //////////////////////////////////////////////////////////////////////////////// // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm_1xTF32::Arguments arguments_1xtf32{problem_size, // <- problem size of matrix multiplication tensor_a_F32.device_ref(), // <- reference to matrix A on device tensor_b_F32.device_ref(), // <- reference to matrix B on device tensor_c_F32.device_ref(), // <- reference to matrix C on device tensor_d_1xTF32.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size_1xtf32 = Gemm_1xTF32::get_workspace_size(arguments_1xtf32); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace_1xtf32(workspace_size_1xtf32); // Instantiate CUTLASS kernel depending on templates Gemm_1xTF32 gemm_op_1xtf32; // Check the problem size is supported or not cutlass::Status status_1xtf32 = gemm_op_1xtf32.can_implement(arguments_1xtf32); CUTLASS_CHECK(status_1xtf32); // Initialize CUTLASS kernel with arguments and workspace pointer status_1xtf32 = gemm_op_1xtf32.initialize(arguments_1xtf32, workspace_1xtf32.get()); CUTLASS_CHECK(status_1xtf32); // Launch initialized CUTLASS kernel status_1xtf32 = gemm_op_1xtf32(); CUTLASS_CHECK(status_1xtf32); tensor_d_1xTF32.sync_host(); //////////////////////////////////////////////////////////////////////////////// // Run reference kernel (F64) //////////////////////////////////////////////////////////////////////////////// // Launch device reference gemm kernel cutlass::reference::device::GemmComplex( problem_size, alpha, tensor_a_F64.device_ref(), TransformA, tensor_b_F64.device_ref(), TransformB, beta, tensor_c_F64.device_ref(), tensor_d_F64.device_ref(), cutlass::complex<double>(0.f)); // Wait for kernels to finish hipDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d_F64.sync_host(); //////////////////////////////////////////////////////////////////////////////// // Run reference kernel (F32) //////////////////////////////////////////////////////////////////////////////// // Launch device reference gemm kernel cutlass::reference::device::GemmComplex( problem_size, alpha, tensor_a_F32.device_ref(), TransformA, tensor_b_F32.device_ref(), TransformB, beta, tensor_c_F32.device_ref(), tensor_d_F32.device_ref(), cutlass::complex<float>(0.f)); // Wait for kernels to finish hipDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d_F32.sync_host(); //////////////////////////////////////////////////////////////////////////////// /////// Compute l2 norms //////////////////////////////////////////////////////////////////////////////// // l2 norm 3xTF32 vs F64 cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_d_3xTF32_in_F64(problem_size.mn()); cutlass::reference::host::TensorCopy(tensor_d_3xTF32_in_F64.host_view(), tensor_d_3xTF32.host_view()); result.l2_norm_3xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_3xTF32_in_F64.host_view(), tensor_d_F64.host_view()); // l2 norm 1xTF32 vs F64 cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_d_1xTF32_in_F64(problem_size.mn()); cutlass::reference::host::TensorCopy(tensor_d_1xTF32_in_F64.host_view(), tensor_d_1xTF32.host_view()); result.l2_norm_1xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_1xTF32_in_F64.host_view(), tensor_d_F64.host_view()); // l2 norm F32 vs F64 cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_d_F32_in_F64(problem_size.mn()); cutlass::reference::host::TensorCopy(tensor_d_F32_in_F64.host_view(), tensor_d_F32.host_view()); result.l2_norm_fp32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_F32_in_F64.host_view(), tensor_d_F64.host_view()); results.push_back(result); /////////////////////////////////////////////////////////////////////////////// // Check if output from CUTLASS kernel and reference kernel are equal or not std::cout << std::fixed; std::cout.precision(4); std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl; std::cout.precision(2); std::cout << "GFLOPs: " << result.gflops << std::endl; std::cout << "Normalized L2 norm of" << std::endl; std::cout.precision(8); std::cout << std::scientific << " - 3xTF32 error with FP64 reference : " << result.l2_norm_3xtf32_vs_fp64 << std::endl << " - 1xTF32 error with FP64 reference : " << result.l2_norm_1xtf32_vs_fp64 << std::endl << " - FP32 error with FP64 reference : " << result.l2_norm_fp32_vs_fp64 << std::endl; return true; } int main(int argc, const char **argv) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } hipDeviceProp_t props; hipError_t error = hipGetDeviceProperties(&props, 0); if (error != hipSuccess) { std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl; return false; } if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } Options options; options.parse(argc, argv); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } bool result = true; if (options.benchmark) { for (int k = 4; k <= 65536; k *= 2) { options.problem_size[2] = k; printf("Gemm problem size: %d x %d x %d\n", \ options.problem_size.m(), options.problem_size.n(), options.problem_size.k()); if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } result &= run(options); } } else { // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } result = run(options); } if (!result) return -1; std::cout << std::endl << "CSV results" << std::endl; Result::print_csv_header(); for(auto &r : results) r.print_csv_row(); return 0; }
39ecd47e7f3f834804546c0dbf94cc23131b2fae.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example is almost the same as example 27 which uses 3xTF32 to run GEMM. The only difference is that this example uses 3xtf32 on complex gemm. To enable this feature, the only change needs to make is to change OpMultiplyAddComplex to OpMultiplyAddComplexFastF32. */ #include <iostream> #include <vector> #include <limits> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_complex.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm_complex.h" #include "cutlass/util/reference/host/tensor_reduce.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/error_metrics.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// Result structure struct Result { double runtime_ms; double gflops; cutlass::Status status; cudaError_t error; int m, n, k; double l2_norm_3xtf32_vs_fp64; double l2_norm_1xtf32_vs_fp64; double l2_norm_fp32_vs_fp64; // ctor Result( int m, int n, int k, double runtime_ms, double gflops, double l2_norm_3xtf32_vs_fp64, double l2_norm_1xtf32_vs_fp64, double l2_norm_fp32_vs_fp64) : m(m), n(n), k(k), runtime_ms(runtime_ms), gflops(gflops), l2_norm_3xtf32_vs_fp64(l2_norm_3xtf32_vs_fp64), l2_norm_1xtf32_vs_fp64(l2_norm_1xtf32_vs_fp64), l2_norm_fp32_vs_fp64(l2_norm_fp32_vs_fp64) {} Result() {} // // Methods // static void print_csv_header() { std::cout << "M,N,K,Runtime(ms),GFLOPS,3xTF32_vs_FP64,1xTF32_vs_FP64,FP32_vs_FP64" << std::endl; } void print_csv_row() { std::cout << m << "," << n << "," << k << "," << runtime_ms << "," << gflops << "," << l2_norm_3xtf32_vs_fp64 << "," << l2_norm_1xtf32_vs_fp64 << "," << l2_norm_fp32_vs_fp64 << std::endl; } }; std::vector<Result> results; /////////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::gemm::GemmCoord problem_size; float alpha; float beta; std::string rand_mode; int iterations; int seed; bool benchmark; Options(): help(false), problem_size({3456, 4096, 4096}), iterations(20), seed(1), alpha(1), beta(), rand_mode("uniform"), benchmark(false) { } bool valid() { return true; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } cmd.get_cmd_line_argument("m", problem_size.m()); cmd.get_cmd_line_argument("n", problem_size.n()); cmd.get_cmd_line_argument("k", problem_size.k()); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("seed", seed); cmd.get_cmd_line_argument("rand_mode", rand_mode); if (cmd.check_cmd_line_flag("benchmark")) { benchmark = true; } } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "29_ampere_3xtf32_fast_accurate_tensorop_complex_gemm example\n\n" << " This example uses the CUTLASS Library to emulate FP32 complex GEMM computations with TF32 tensor cores.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --m=<int> GEMM M dimension\n" << " --n=<int> GEMM N dimension\n" << " --k=<int> GEMM K dimension\n" << " --alpha=<f32> Epilogue scalar alpha\n" << " --beta=<f32> Epilogue scalar beta\n\n" << " --rand_mode=<string> gauss / uniform*\n\n" << " --seed=<int> Random number seed (1*)\n\n" << " --iterations=<int> Number of profiling iterations to perform.\n\n" << " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n\n"; out << "\n\nExamples:\n\n" << "$ ./examples/29_ampere_3xtf32_fast_accurate_tensorop_complex_gemm/29_3xtf32_complex_gemm --m=1024 --n=512 \\\n" << " --alpha=2 --beta=0.707 \n\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of real-valued multiply-adds int64_t fmas = problem_size.product(); // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::RowMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<64, 64, 16>; // <- threadblock tile M = 128, N = 128, K = 16 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<32, 32, 16>; // <- warp tile M = 64, N = 64, K = 16 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< cutlass::complex<float>, // <- data type of output matrix 1, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too cutlass::complex<float>, // <- data type of accumulator cutlass::complex<float>>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 3; // Transform constexpr cutlass::ComplexTransform TransformA = cutlass::ComplexTransform::kNone; constexpr cutlass::ComplexTransform TransformB = cutlass::ComplexTransform::kNone; // // Gemm Operators (Gemm_3xTF32, Gemm_1xTF32, GEMM_F32, GEMM_F64) // // Gemm_3xTF32 using Gemm_3xTF32 = cutlass::gemm::device::GemmComplex< cutlass::complex<float>, LayoutInputA, cutlass::complex<float>, LayoutInputB, cutlass::complex<float>, LayoutOutput, cutlass::complex<float>, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages, TransformA, TransformB, cutlass::arch::OpMultiplyAddComplexFastF32>; // Gemm_1xTF32 using Gemm_1xTF32 = cutlass::gemm::device::GemmComplex< cutlass::complex<float>, LayoutInputA, cutlass::complex<float>, LayoutInputB, cutlass::complex<float>, LayoutOutput, cutlass::complex<float>, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages, TransformA, TransformB, cutlass::arch::OpMultiplyAddComplex>; bool run(Options &options) { // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size = options.problem_size; //////////////////////////////////////////////////////////////////////////////// /// 1. Initialize F32 Precision input tensors using CUTLASS helper functions //////////////////////////////////////////////////////////////////////////////// cutlass::HostTensor<cutlass::complex<float>, LayoutInputA> tensor_a_F32(problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<cutlass::complex<float>, LayoutInputB> tensor_b_F32(problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<cutlass::complex<float>, LayoutOutput> tensor_c_F32(problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<cutlass::complex<float>, LayoutOutput> tensor_d_F32(problem_size.mn()); // <- Create matrix D with dimensions M x N if (options.rand_mode == "uniform") { const float min = -1; const float max = 1; // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a_F32.host_view(), options.seed, double(max), double(min)); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b_F32.host_view(), options.seed, double(max), double(min)); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c_F32.host_view(), options.seed, double(max), double(min)); // <- Fill matrix C on host with uniform-distribution random data } else if (options.rand_mode == "gauss") { // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomGaussian( tensor_a_F32.host_view(), options.seed, double(0), double(5)); // <- Fill matrix A on host with gaussian-distribution random data cutlass::reference::host::TensorFillRandomGaussian( tensor_b_F32.host_view(), options.seed, double(0), double(5)); // <- Fill matrix B on host with gaussian-distribution random data cutlass::reference::host::TensorFillRandomGaussian( tensor_c_F32.host_view(), options.seed, double(0), double(5)); // <- Fill matrix C on host with gaussian-distribution random data } cutlass::reference::host::TensorFill( tensor_d_F32.host_view()); // <- fill matrix D on host with zeros // Copy data from host to GPU tensor_a_F32.sync_device(); tensor_b_F32.sync_device(); tensor_c_F32.sync_device(); tensor_d_F32.sync_device(); //////////////////////////////////////////////////////////////////////////////// /// 2. Initialize F64 tensors using the same values used for F32 //////////////////////////////////////////////////////////////////////////////// // Gemm input operands (A, B, C) cutlass::HostTensor<cutlass::complex<double>, LayoutInputA> tensor_a_F64(problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<cutlass::complex<double>, LayoutInputB> tensor_b_F64(problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_c_F64(problem_size.mn()); // <- Create matrix C with dimensions M x N // Gemm output (D) for GEMM_F64 cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_d_F64(problem_size.mn()); // <- Create matrix D with dimensions M x N // Gemm output (D) for GEMM_3xTF32 cutlass::HostTensor<cutlass::complex<float>, LayoutOutput> tensor_d_3xTF32(problem_size.mn()); // <- Create matrix D with dimensions M x N // Gemm output (D) for GEMM_1xTF32 cutlass::HostTensor<cutlass::complex<float>, LayoutOutput> tensor_d_1xTF32(problem_size.mn()); // <- Create matrix D with dimensions M x N // Copy values from the DP tensors cutlass::reference::host::TensorCopy(tensor_a_F64.host_view(), tensor_a_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_b_F64.host_view(), tensor_b_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_c_F64.host_view(), tensor_c_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_F64.host_view(), tensor_d_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_3xTF32.host_view(), tensor_d_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_1xTF32.host_view(), tensor_d_F32.host_view()); // Copy data from host to GPU tensor_a_F64.sync_device(); tensor_b_F64.sync_device(); tensor_c_F64.sync_device(); tensor_d_F64.sync_device(); tensor_d_3xTF32.sync_device(); tensor_d_1xTF32.sync_device(); // Initialize alpha and beta for dot product computation cutlass::complex<float> alpha = cutlass::complex<float>(options.alpha); cutlass::complex<float> beta = cutlass::complex<float>(options.beta); // Split K dimension into 1 partitions int split_k_slices = 1; //////////////////////////////////////////////////////////////////////////////// /// 3. Run 3xTF32 kernel within a profiling loop //////////////////////////////////////////////////////////////////////////////// // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm_3xTF32::Arguments arguments_3xtf32{problem_size, // <- problem size of matrix multiplication tensor_a_F32.device_ref(), // <- reference to matrix A on device tensor_b_F32.device_ref(), // <- reference to matrix B on device tensor_c_F32.device_ref(), // <- reference to matrix C on device tensor_d_3xTF32.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size_3xtf32 = Gemm_3xTF32::get_workspace_size(arguments_3xtf32); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace_3xtf32(workspace_size_3xtf32); // Instantiate CUTLASS kernel depending on templates Gemm_3xTF32 gemm_op; // Check the problem size is supported or not cutlass::Status status_3xtf32 = gemm_op.can_implement(arguments_3xtf32); CUTLASS_CHECK(status_3xtf32); // Initialize CUTLASS kernel with arguments and workspace pointer status_3xtf32 = gemm_op.initialize(arguments_3xtf32, workspace_3xtf32.get()); CUTLASS_CHECK(status_3xtf32); // Result structure Result result; // // Construct events // cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return false; } } // Record an event at the start of a series of GEMMs result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return false; } // // Run profiling loop // for (int iter = 0; iter < options.iterations; ++iter) { // Launch initialized CUTLASS kernel status_3xtf32 = gemm_op(); CUTLASS_CHECK(status_3xtf32); } // // Stop profiling loop // // Record an event when the GEMMs are complete result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return false; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return false; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return false; } // Compute average runtime and GFLOPs. result.m = problem_size.m(); result.n = problem_size.n(); result.k = problem_size.k(); result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // Cleanup for (auto event : events) { (void)cudaEventDestroy(event); } tensor_d_3xTF32.sync_host(); //////////////////////////////////////////////////////////////////////////////// /// 4. Run TF32 kernel without profiling loop //////////////////////////////////////////////////////////////////////////////// // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm_1xTF32::Arguments arguments_1xtf32{problem_size, // <- problem size of matrix multiplication tensor_a_F32.device_ref(), // <- reference to matrix A on device tensor_b_F32.device_ref(), // <- reference to matrix B on device tensor_c_F32.device_ref(), // <- reference to matrix C on device tensor_d_1xTF32.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size_1xtf32 = Gemm_1xTF32::get_workspace_size(arguments_1xtf32); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace_1xtf32(workspace_size_1xtf32); // Instantiate CUTLASS kernel depending on templates Gemm_1xTF32 gemm_op_1xtf32; // Check the problem size is supported or not cutlass::Status status_1xtf32 = gemm_op_1xtf32.can_implement(arguments_1xtf32); CUTLASS_CHECK(status_1xtf32); // Initialize CUTLASS kernel with arguments and workspace pointer status_1xtf32 = gemm_op_1xtf32.initialize(arguments_1xtf32, workspace_1xtf32.get()); CUTLASS_CHECK(status_1xtf32); // Launch initialized CUTLASS kernel status_1xtf32 = gemm_op_1xtf32(); CUTLASS_CHECK(status_1xtf32); tensor_d_1xTF32.sync_host(); //////////////////////////////////////////////////////////////////////////////// // Run reference kernel (F64) //////////////////////////////////////////////////////////////////////////////// // Launch device reference gemm kernel cutlass::reference::device::GemmComplex( problem_size, alpha, tensor_a_F64.device_ref(), TransformA, tensor_b_F64.device_ref(), TransformB, beta, tensor_c_F64.device_ref(), tensor_d_F64.device_ref(), cutlass::complex<double>(0.f)); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d_F64.sync_host(); //////////////////////////////////////////////////////////////////////////////// // Run reference kernel (F32) //////////////////////////////////////////////////////////////////////////////// // Launch device reference gemm kernel cutlass::reference::device::GemmComplex( problem_size, alpha, tensor_a_F32.device_ref(), TransformA, tensor_b_F32.device_ref(), TransformB, beta, tensor_c_F32.device_ref(), tensor_d_F32.device_ref(), cutlass::complex<float>(0.f)); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d_F32.sync_host(); //////////////////////////////////////////////////////////////////////////////// /////// Compute l2 norms //////////////////////////////////////////////////////////////////////////////// // l2 norm 3xTF32 vs F64 cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_d_3xTF32_in_F64(problem_size.mn()); cutlass::reference::host::TensorCopy(tensor_d_3xTF32_in_F64.host_view(), tensor_d_3xTF32.host_view()); result.l2_norm_3xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_3xTF32_in_F64.host_view(), tensor_d_F64.host_view()); // l2 norm 1xTF32 vs F64 cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_d_1xTF32_in_F64(problem_size.mn()); cutlass::reference::host::TensorCopy(tensor_d_1xTF32_in_F64.host_view(), tensor_d_1xTF32.host_view()); result.l2_norm_1xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_1xTF32_in_F64.host_view(), tensor_d_F64.host_view()); // l2 norm F32 vs F64 cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_d_F32_in_F64(problem_size.mn()); cutlass::reference::host::TensorCopy(tensor_d_F32_in_F64.host_view(), tensor_d_F32.host_view()); result.l2_norm_fp32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_F32_in_F64.host_view(), tensor_d_F64.host_view()); results.push_back(result); /////////////////////////////////////////////////////////////////////////////// // Check if output from CUTLASS kernel and reference kernel are equal or not std::cout << std::fixed; std::cout.precision(4); std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl; std::cout.precision(2); std::cout << "GFLOPs: " << result.gflops << std::endl; std::cout << "Normalized L2 norm of" << std::endl; std::cout.precision(8); std::cout << std::scientific << " - 3xTF32 error with FP64 reference : " << result.l2_norm_3xtf32_vs_fp64 << std::endl << " - 1xTF32 error with FP64 reference : " << result.l2_norm_1xtf32_vs_fp64 << std::endl << " - FP32 error with FP64 reference : " << result.l2_norm_fp32_vs_fp64 << std::endl; return true; } int main(int argc, const char **argv) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return false; } if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } Options options; options.parse(argc, argv); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } bool result = true; if (options.benchmark) { for (int k = 4; k <= 65536; k *= 2) { options.problem_size[2] = k; printf("Gemm problem size: %d x %d x %d\n", \ options.problem_size.m(), options.problem_size.n(), options.problem_size.k()); if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } result &= run(options); } } else { // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } result = run(options); } if (!result) return -1; std::cout << std::endl << "CSV results" << std::endl; Result::print_csv_header(); for(auto &r : results) r.print_csv_row(); return 0; }
e20b895e9e5bcae09dfd3e583dff74446f91decf.hip
// !!! This is a file automatically generated by hipify!!! #include "visualTur.hpp" #include "hdf5.h" #include <iostream> #include <fstream> #include <sstream> #include <sys/time.h> #define WHERESTR "Error[file "<<__FILE__<<", line "<<__LINE__<<"]: " int getnLevelFile(char * file_name, char * dataset_name) { hid_t file_id; hid_t dataset_id; hid_t spaceid; int ndim; hsize_t dims[3]; if ((file_id = H5Fopen(file_name, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { std::cerr<< WHERESTR<<" unable to open the requested file"<<std::endl; exit(0); } if ((dataset_id = H5Dopen1(file_id, dataset_name)) < 0 ) { std::cerr<<WHERESTR<<" unable to open the requested data set"<<std::endl; exit(0); } if ((spaceid = H5Dget_space(dataset_id)) < 0) { std::cerr<<WHERESTR<<" unable to open the requested data space"<<std::endl; exit(0); } if ((ndim = H5Sget_simple_extent_dims (spaceid, dims, NULL)) < 0) { std::cerr<<WHERESTR<<" handling file"<<std::endl; exit(0); } herr_t status; if ((status = H5Dclose(dataset_id)) < 0) { std::cerr<<WHERESTR<<" unable to close the data set"<<std::endl; exit(0); } if ((status = H5Fclose(file_id)) < 0); { std::cerr<<WHERESTR<<" unable to close the file"<<std::endl; } int dimension; if (dims[0]>dims[1] && dims[0]>dims[2]) dimension = dims[0]; else if (dims[1]>dims[2]) dimension = dims[1]; else dimension = dims[2]; /* Calcular dimension del rbol*/ float aux = logf(dimension)/logf(2.0); float aux2 = aux - floorf(aux); int nLevels = aux2>0.0 ? aux+1 : aux; return nLevels; } int main(int argc, char ** argv) { if (argc < 4) { std::cerr<<"Error, testVisualTur hdf5_file dataset_name octree_file [device]"<<std::endl; return 0; } int device = 0; if (argc > 4) { device = atoi(argv[4]); } int W = 1024; int H = 1024; int x,y,z; std::cout<<"Display Resolution:"<<std::endl; std::cout<<"Width: "; std::cin >> W; std::cout<<"Height: "; std::cin >> H; std::cout<<"Camera position (X,Y,Z):"<<std::endl; std::cout<<"X: "; std::cin >> x; std::cout<<"Y: "; std::cin >> y; std::cout<<"Z: "; std::cin >> z; int nLevel = getnLevelFile(argv[1], argv[2]); hipSetDevice(device); hipDeviceSetCacheConfig(hipFuncCachePreferL1); //get the amount of free memory on the graphics card size_t free; size_t total; hipMemGetInfo(&free, &total); visualTurParams_t params; params.W = W; params.H = H; params.fov_H = 35.0f; params.fov_W = 35.0f; params.distance = 50.0f; params.numRayPx = 1; params.maxElementsCache = 3*(free / (38*38*38*4)) /4; params.maxElementsCache_CPU = 5000; params.dimCubeCache = make_int3(32,32,32); params.cubeInc = 2; params.levelCubes = nLevel - 5; params.octreeLevel = (nLevel - 5) + 3; params.hdf5File = argv[1]; params.dataset_name = argv[2]; params.octreeFile = argv[3]; float * screenC = new float[H*W*4]; float * screenG = 0; std::cerr<<"Allocating memory octree CUDA screen: "<< hipGetErrorString(hipMalloc((void**)&screenG, sizeof(float)*H*W*4))<<std::endl; visualTur * VisualTur = new visualTur(params); VisualTur->camera_Move(make_float3(x,y,z)); struct timeval st, end; double totalT = 0.0; for(int m=0; m<1000; m++) { gettimeofday(&st, NULL); VisualTur->updateVisibleCubes(screenG); std::cerr<<"Retrieve screen from GPU: "<< hipGetErrorString(hipMemcpy((void*) screenC, (const void*) screenG, sizeof(float)*W*H*4, hipMemcpyDeviceToHost))<<std::endl; VisualTur->camera_StrafeRight(0.5f); gettimeofday(&end, NULL); double delta = ((end.tv_sec - st.tv_sec) * 1000000u + end.tv_usec - st.tv_usec) / 1.e6; std::cout << "Time elapsed iteration "<<m<<": " << delta << " sec"<< std::endl; totalT+=delta; } std::cout << "Time elapsed: " << totalT<< " sec to 1000 iterations"<< std::endl; std::cout << "Average time elapsed: " << totalT/1000.0f<< " sec"<< std::endl; hipFree(screenG); delete[] screenC; delete VisualTur; }
e20b895e9e5bcae09dfd3e583dff74446f91decf.cu
#include "visualTur.hpp" #include "hdf5.h" #include <iostream> #include <fstream> #include <sstream> #include <sys/time.h> #define WHERESTR "Error[file "<<__FILE__<<", line "<<__LINE__<<"]: " int getnLevelFile(char * file_name, char * dataset_name) { hid_t file_id; hid_t dataset_id; hid_t spaceid; int ndim; hsize_t dims[3]; if ((file_id = H5Fopen(file_name, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { std::cerr<< WHERESTR<<" unable to open the requested file"<<std::endl; exit(0); } if ((dataset_id = H5Dopen1(file_id, dataset_name)) < 0 ) { std::cerr<<WHERESTR<<" unable to open the requested data set"<<std::endl; exit(0); } if ((spaceid = H5Dget_space(dataset_id)) < 0) { std::cerr<<WHERESTR<<" unable to open the requested data space"<<std::endl; exit(0); } if ((ndim = H5Sget_simple_extent_dims (spaceid, dims, NULL)) < 0) { std::cerr<<WHERESTR<<" handling file"<<std::endl; exit(0); } herr_t status; if ((status = H5Dclose(dataset_id)) < 0) { std::cerr<<WHERESTR<<" unable to close the data set"<<std::endl; exit(0); } if ((status = H5Fclose(file_id)) < 0); { std::cerr<<WHERESTR<<" unable to close the file"<<std::endl; } int dimension; if (dims[0]>dims[1] && dims[0]>dims[2]) dimension = dims[0]; else if (dims[1]>dims[2]) dimension = dims[1]; else dimension = dims[2]; /* Calcular dimension del árbol*/ float aux = logf(dimension)/logf(2.0); float aux2 = aux - floorf(aux); int nLevels = aux2>0.0 ? aux+1 : aux; return nLevels; } int main(int argc, char ** argv) { if (argc < 4) { std::cerr<<"Error, testVisualTur hdf5_file dataset_name octree_file [device]"<<std::endl; return 0; } int device = 0; if (argc > 4) { device = atoi(argv[4]); } int W = 1024; int H = 1024; int x,y,z; std::cout<<"Display Resolution:"<<std::endl; std::cout<<"Width: "; std::cin >> W; std::cout<<"Height: "; std::cin >> H; std::cout<<"Camera position (X,Y,Z):"<<std::endl; std::cout<<"X: "; std::cin >> x; std::cout<<"Y: "; std::cin >> y; std::cout<<"Z: "; std::cin >> z; int nLevel = getnLevelFile(argv[1], argv[2]); cudaSetDevice(device); cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); //get the amount of free memory on the graphics card size_t free; size_t total; cudaMemGetInfo(&free, &total); visualTurParams_t params; params.W = W; params.H = H; params.fov_H = 35.0f; params.fov_W = 35.0f; params.distance = 50.0f; params.numRayPx = 1; params.maxElementsCache = 3*(free / (38*38*38*4)) /4; params.maxElementsCache_CPU = 5000; params.dimCubeCache = make_int3(32,32,32); params.cubeInc = 2; params.levelCubes = nLevel - 5; params.octreeLevel = (nLevel - 5) + 3; params.hdf5File = argv[1]; params.dataset_name = argv[2]; params.octreeFile = argv[3]; float * screenC = new float[H*W*4]; float * screenG = 0; std::cerr<<"Allocating memory octree CUDA screen: "<< cudaGetErrorString(cudaMalloc((void**)&screenG, sizeof(float)*H*W*4))<<std::endl; visualTur * VisualTur = new visualTur(params); VisualTur->camera_Move(make_float3(x,y,z)); struct timeval st, end; double totalT = 0.0; for(int m=0; m<1000; m++) { gettimeofday(&st, NULL); VisualTur->updateVisibleCubes(screenG); std::cerr<<"Retrieve screen from GPU: "<< cudaGetErrorString(cudaMemcpy((void*) screenC, (const void*) screenG, sizeof(float)*W*H*4, cudaMemcpyDeviceToHost))<<std::endl; VisualTur->camera_StrafeRight(0.5f); gettimeofday(&end, NULL); double delta = ((end.tv_sec - st.tv_sec) * 1000000u + end.tv_usec - st.tv_usec) / 1.e6; std::cout << "Time elapsed iteration "<<m<<": " << delta << " sec"<< std::endl; totalT+=delta; } std::cout << "Time elapsed: " << totalT<< " sec to 1000 iterations"<< std::endl; std::cout << "Average time elapsed: " << totalT/1000.0f<< " sec"<< std::endl; cudaFree(screenG); delete[] screenC; delete VisualTur; }
8c74d89d917c7170ab0c0c2c47b80f048499e29a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _TIMER_ #include "hip/hip_runtime_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(double * input, int N, int M, double * __var_4__){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int __iter_0__; __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1; if(__iter_0__ <= (M-2)){ int __iter_1__; __iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1; if(__iter_1__ <= (N-2)){ double __temp_0__; __temp_0__ = (7 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(-1))]); double __temp_1__; __temp_1__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(-1))]); double __temp_2__; __temp_2__ = (__temp_0__ + __temp_1__); double __temp_3__; __temp_3__ = (9 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(-1))]); double __temp_4__; __temp_4__ = (__temp_2__ + __temp_3__); double __temp_5__; __temp_5__ = (12 * input[__iter_0__+(-1)+(M-0)*(__iter_1__)]); double __temp_6__; __temp_6__ = (__temp_4__ + __temp_5__); double __temp_7__; __temp_7__ = (15 * input[__iter_0__+(M-0)*(__iter_1__)]); double __temp_8__; __temp_8__ = (__temp_6__ + __temp_7__); double __temp_9__; __temp_9__ = (12 * input[__iter_0__+(1)+(M-0)*(__iter_1__)]); double __temp_10__; __temp_10__ = (__temp_8__ + __temp_9__); double __temp_11__; __temp_11__ = (9 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(1))]); double __temp_12__; __temp_12__ = (__temp_10__ + __temp_11__); double __temp_13__; __temp_13__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(1))]); double __temp_14__; __temp_14__ = (__temp_12__ + __temp_13__); double __temp_15__; __temp_15__ = (7 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(1))]); double __temp_16__; __temp_16__ = (__temp_14__ + __temp_15__); double __temp_17__; __temp_17__ = (__temp_16__ / 118); __var_4__[__iter_0__+(M-0)*(__iter_1__)] = __temp_17__; } } } __global__ void __kernel___forma_kernel__1__(double * __var_4__, int N, int M, double * __var_3__){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int __iter_2__; __iter_2__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1; if(__iter_2__ <= (M-2)){ int __iter_3__; __iter_3__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1; if(__iter_3__ <= (N-2)){ double __temp_18__; __temp_18__ = (7 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(-1))]); double __temp_19__; __temp_19__ = (5 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(-1))]); double __temp_20__; __temp_20__ = (__temp_18__ + __temp_19__); double __temp_21__; __temp_21__ = (9 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(-1))]); double __temp_22__; __temp_22__ = (__temp_20__ + __temp_21__); double __temp_23__; __temp_23__ = (12 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__)]); double __temp_24__; __temp_24__ = (__temp_22__ + __temp_23__); double __temp_25__; __temp_25__ = (15 * __var_4__[__iter_2__+(M-0)*(__iter_3__)]); double __temp_26__; __temp_26__ = (__temp_24__ + __temp_25__); double __temp_27__; __temp_27__ = (12 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__)]); double __temp_28__; __temp_28__ = (__temp_26__ + __temp_27__); double __temp_29__; __temp_29__ = (9 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(1))]); double __temp_30__; __temp_30__ = (__temp_28__ + __temp_29__); double __temp_31__; __temp_31__ = (5 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(1))]); double __temp_32__; __temp_32__ = (__temp_30__ + __temp_31__); double __temp_33__; __temp_33__ = (7 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(1))]); double __temp_34__; __temp_34__ = (__temp_32__ + __temp_33__); double __temp_35__; __temp_35__ = (__temp_34__ / 118); __var_3__[__iter_2__+(M-0)*(__iter_3__)] = __temp_35__; } } } __global__ void __kernel___forma_kernel__2__(double * __var_3__, int N, int M, double * __var_2__){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int __iter_4__; __iter_4__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1; if(__iter_4__ <= (M-2)){ int __iter_5__; __iter_5__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1; if(__iter_5__ <= (N-2)){ double __temp_36__; __temp_36__ = (7 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(-1))]); double __temp_37__; __temp_37__ = (5 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(-1))]); double __temp_38__; __temp_38__ = (__temp_36__ + __temp_37__); double __temp_39__; __temp_39__ = (9 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(-1))]); double __temp_40__; __temp_40__ = (__temp_38__ + __temp_39__); double __temp_41__; __temp_41__ = (12 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__)]); double __temp_42__; __temp_42__ = (__temp_40__ + __temp_41__); double __temp_43__; __temp_43__ = (15 * __var_3__[__iter_4__+(M-0)*(__iter_5__)]); double __temp_44__; __temp_44__ = (__temp_42__ + __temp_43__); double __temp_45__; __temp_45__ = (12 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__)]); double __temp_46__; __temp_46__ = (__temp_44__ + __temp_45__); double __temp_47__; __temp_47__ = (9 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(1))]); double __temp_48__; __temp_48__ = (__temp_46__ + __temp_47__); double __temp_49__; __temp_49__ = (5 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(1))]); double __temp_50__; __temp_50__ = (__temp_48__ + __temp_49__); double __temp_51__; __temp_51__ = (7 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(1))]); double __temp_52__; __temp_52__ = (__temp_50__ + __temp_51__); double __temp_53__; __temp_53__ = (__temp_52__ / 118); __var_2__[__iter_4__+(M-0)*(__iter_5__)] = __temp_53__; } } } __global__ void __kernel___forma_kernel__3__(double * __var_2__, int N, int M, double * __var_1__){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int __iter_6__; __iter_6__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1; if(__iter_6__ <= (M-2)){ int __iter_7__; __iter_7__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1; if(__iter_7__ <= (N-2)){ double __temp_54__; __temp_54__ = (7 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(-1))]); double __temp_55__; __temp_55__ = (5 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(-1))]); double __temp_56__; __temp_56__ = (__temp_54__ + __temp_55__); double __temp_57__; __temp_57__ = (9 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(-1))]); double __temp_58__; __temp_58__ = (__temp_56__ + __temp_57__); double __temp_59__; __temp_59__ = (12 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__)]); double __temp_60__; __temp_60__ = (__temp_58__ + __temp_59__); double __temp_61__; __temp_61__ = (15 * __var_2__[__iter_6__+(M-0)*(__iter_7__)]); double __temp_62__; __temp_62__ = (__temp_60__ + __temp_61__); double __temp_63__; __temp_63__ = (12 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__)]); double __temp_64__; __temp_64__ = (__temp_62__ + __temp_63__); double __temp_65__; __temp_65__ = (9 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(1))]); double __temp_66__; __temp_66__ = (__temp_64__ + __temp_65__); double __temp_67__; __temp_67__ = (5 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(1))]); double __temp_68__; __temp_68__ = (__temp_66__ + __temp_67__); double __temp_69__; __temp_69__ = (7 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(1))]); double __temp_70__; __temp_70__ = (__temp_68__ + __temp_69__); double __temp_71__; __temp_71__ = (__temp_70__ / 118); __var_1__[__iter_6__+(M-0)*(__iter_7__)] = __temp_71__; } } } /*Device code End */ /* Host Code Begin */ extern "C" void jacobi(double * h_input, int N, int M, double * __var_0__){ /* Host allocation Begin */ double * input; hipMalloc(&input,sizeof(double)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : input\n"); hipPointerAttribute_t ptrAttrib_h_input; hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice; if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess) if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice) memcpy_kind_h_input = hipMemcpyDeviceToDevice; hipGetLastError(); if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){ hipMemcpy(input,h_input,sizeof(double)*((N-0)*(M-0)), memcpy_kind_h_input); } double * __var_1__; hipMalloc(&__var_1__,sizeof(double)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); double * __var_2__; hipMalloc(&__var_2__,sizeof(double)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __var_2__\n"); double * __var_3__; hipMalloc(&__var_3__,sizeof(double)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __var_3__\n"); double * __var_4__; hipMalloc(&__var_4__,sizeof(double)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __var_4__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ #ifdef _TIMER_ hipEvent_t _forma_timer_start_,_forma_timer_stop_; hipEventCreate(&_forma_timer_start_); hipEventCreate(&_forma_timer_stop_); hipEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = ((M-2) - 1 ) + 1; int __size_1___kernel___forma_kernel__0__ = ((N-2) - 1 ) + 1; int __max_occupancy_blocksize___kernel___forma_kernel__0__; int _max_occupancy_gridsize___kernel___forma_kernel__0__; hipOccupancyMaxPotentialBlockSize(&_max_occupancy_gridsize___kernel___forma_kernel__0__,&__max_occupancy_blocksize___kernel___forma_kernel__0__,(const void*)__kernel___forma_kernel__0__,0,0); int __max_occupancy_blocksize___kernel___forma_kernel__0___0 = pow((double)__max_occupancy_blocksize___kernel___forma_kernel__0__, (double)(1.0/(double)2)); __max_occupancy_blocksize___kernel___forma_kernel__0___0 = FORMA_MAX(__max_occupancy_blocksize___kernel___forma_kernel__0___0/32, 1)*32; int __block_0___kernel___forma_kernel__0__ = 32; int __block_1___kernel___forma_kernel__0__ = 32; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __var_4__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_4__, N, M, __var_3__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_3__, N, M, __var_2__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, N, M, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); hipPointerAttribute_t ptrAttrib___var_0__; hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost; if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess) if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice) memcpy_kind___var_0__ = hipMemcpyDeviceToDevice; hipGetLastError(); hipMemcpy(__var_0__,__var_1__, sizeof(double)*((N-0)*(M-0)), memcpy_kind___var_0__); #ifdef _TIMER_ hipEventRecord(_forma_timer_stop_,0); hipEventSynchronize(_forma_timer_stop_); float elapsedTime; hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); hipEventDestroy(_forma_timer_start_); hipEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ hipFree(input); hipFree(__var_1__); hipFree(__var_2__); hipFree(__var_3__); hipFree(__var_4__); } /*Host Free End*/
8c74d89d917c7170ab0c0c2c47b80f048499e29a.cu
#include "cuda.h" #ifdef _TIMER_ #include "cuda_profiler_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); __kernel_init__<<<init_grid,init_block>>>(d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(double * input, int N, int M, double * __var_4__){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int __iter_0__; __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1; if(__iter_0__ <= (M-2)){ int __iter_1__; __iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1; if(__iter_1__ <= (N-2)){ double __temp_0__; __temp_0__ = (7 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(-1))]); double __temp_1__; __temp_1__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(-1))]); double __temp_2__; __temp_2__ = (__temp_0__ + __temp_1__); double __temp_3__; __temp_3__ = (9 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(-1))]); double __temp_4__; __temp_4__ = (__temp_2__ + __temp_3__); double __temp_5__; __temp_5__ = (12 * input[__iter_0__+(-1)+(M-0)*(__iter_1__)]); double __temp_6__; __temp_6__ = (__temp_4__ + __temp_5__); double __temp_7__; __temp_7__ = (15 * input[__iter_0__+(M-0)*(__iter_1__)]); double __temp_8__; __temp_8__ = (__temp_6__ + __temp_7__); double __temp_9__; __temp_9__ = (12 * input[__iter_0__+(1)+(M-0)*(__iter_1__)]); double __temp_10__; __temp_10__ = (__temp_8__ + __temp_9__); double __temp_11__; __temp_11__ = (9 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(1))]); double __temp_12__; __temp_12__ = (__temp_10__ + __temp_11__); double __temp_13__; __temp_13__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(1))]); double __temp_14__; __temp_14__ = (__temp_12__ + __temp_13__); double __temp_15__; __temp_15__ = (7 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(1))]); double __temp_16__; __temp_16__ = (__temp_14__ + __temp_15__); double __temp_17__; __temp_17__ = (__temp_16__ / 118); __var_4__[__iter_0__+(M-0)*(__iter_1__)] = __temp_17__; } } } __global__ void __kernel___forma_kernel__1__(double * __var_4__, int N, int M, double * __var_3__){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int __iter_2__; __iter_2__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1; if(__iter_2__ <= (M-2)){ int __iter_3__; __iter_3__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1; if(__iter_3__ <= (N-2)){ double __temp_18__; __temp_18__ = (7 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(-1))]); double __temp_19__; __temp_19__ = (5 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(-1))]); double __temp_20__; __temp_20__ = (__temp_18__ + __temp_19__); double __temp_21__; __temp_21__ = (9 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(-1))]); double __temp_22__; __temp_22__ = (__temp_20__ + __temp_21__); double __temp_23__; __temp_23__ = (12 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__)]); double __temp_24__; __temp_24__ = (__temp_22__ + __temp_23__); double __temp_25__; __temp_25__ = (15 * __var_4__[__iter_2__+(M-0)*(__iter_3__)]); double __temp_26__; __temp_26__ = (__temp_24__ + __temp_25__); double __temp_27__; __temp_27__ = (12 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__)]); double __temp_28__; __temp_28__ = (__temp_26__ + __temp_27__); double __temp_29__; __temp_29__ = (9 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(1))]); double __temp_30__; __temp_30__ = (__temp_28__ + __temp_29__); double __temp_31__; __temp_31__ = (5 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(1))]); double __temp_32__; __temp_32__ = (__temp_30__ + __temp_31__); double __temp_33__; __temp_33__ = (7 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(1))]); double __temp_34__; __temp_34__ = (__temp_32__ + __temp_33__); double __temp_35__; __temp_35__ = (__temp_34__ / 118); __var_3__[__iter_2__+(M-0)*(__iter_3__)] = __temp_35__; } } } __global__ void __kernel___forma_kernel__2__(double * __var_3__, int N, int M, double * __var_2__){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int __iter_4__; __iter_4__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1; if(__iter_4__ <= (M-2)){ int __iter_5__; __iter_5__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1; if(__iter_5__ <= (N-2)){ double __temp_36__; __temp_36__ = (7 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(-1))]); double __temp_37__; __temp_37__ = (5 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(-1))]); double __temp_38__; __temp_38__ = (__temp_36__ + __temp_37__); double __temp_39__; __temp_39__ = (9 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(-1))]); double __temp_40__; __temp_40__ = (__temp_38__ + __temp_39__); double __temp_41__; __temp_41__ = (12 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__)]); double __temp_42__; __temp_42__ = (__temp_40__ + __temp_41__); double __temp_43__; __temp_43__ = (15 * __var_3__[__iter_4__+(M-0)*(__iter_5__)]); double __temp_44__; __temp_44__ = (__temp_42__ + __temp_43__); double __temp_45__; __temp_45__ = (12 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__)]); double __temp_46__; __temp_46__ = (__temp_44__ + __temp_45__); double __temp_47__; __temp_47__ = (9 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(1))]); double __temp_48__; __temp_48__ = (__temp_46__ + __temp_47__); double __temp_49__; __temp_49__ = (5 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(1))]); double __temp_50__; __temp_50__ = (__temp_48__ + __temp_49__); double __temp_51__; __temp_51__ = (7 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(1))]); double __temp_52__; __temp_52__ = (__temp_50__ + __temp_51__); double __temp_53__; __temp_53__ = (__temp_52__ / 118); __var_2__[__iter_4__+(M-0)*(__iter_5__)] = __temp_53__; } } } __global__ void __kernel___forma_kernel__3__(double * __var_2__, int N, int M, double * __var_1__){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int __iter_6__; __iter_6__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1; if(__iter_6__ <= (M-2)){ int __iter_7__; __iter_7__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1; if(__iter_7__ <= (N-2)){ double __temp_54__; __temp_54__ = (7 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(-1))]); double __temp_55__; __temp_55__ = (5 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(-1))]); double __temp_56__; __temp_56__ = (__temp_54__ + __temp_55__); double __temp_57__; __temp_57__ = (9 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(-1))]); double __temp_58__; __temp_58__ = (__temp_56__ + __temp_57__); double __temp_59__; __temp_59__ = (12 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__)]); double __temp_60__; __temp_60__ = (__temp_58__ + __temp_59__); double __temp_61__; __temp_61__ = (15 * __var_2__[__iter_6__+(M-0)*(__iter_7__)]); double __temp_62__; __temp_62__ = (__temp_60__ + __temp_61__); double __temp_63__; __temp_63__ = (12 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__)]); double __temp_64__; __temp_64__ = (__temp_62__ + __temp_63__); double __temp_65__; __temp_65__ = (9 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(1))]); double __temp_66__; __temp_66__ = (__temp_64__ + __temp_65__); double __temp_67__; __temp_67__ = (5 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(1))]); double __temp_68__; __temp_68__ = (__temp_66__ + __temp_67__); double __temp_69__; __temp_69__ = (7 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(1))]); double __temp_70__; __temp_70__ = (__temp_68__ + __temp_69__); double __temp_71__; __temp_71__ = (__temp_70__ / 118); __var_1__[__iter_6__+(M-0)*(__iter_7__)] = __temp_71__; } } } /*Device code End */ /* Host Code Begin */ extern "C" void jacobi(double * h_input, int N, int M, double * __var_0__){ /* Host allocation Begin */ double * input; cudaMalloc(&input,sizeof(double)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : input\n"); cudaPointerAttributes ptrAttrib_h_input; cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice; if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess) if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice) memcpy_kind_h_input = cudaMemcpyDeviceToDevice; cudaGetLastError(); if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){ cudaMemcpy(input,h_input,sizeof(double)*((N-0)*(M-0)), memcpy_kind_h_input); } double * __var_1__; cudaMalloc(&__var_1__,sizeof(double)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); double * __var_2__; cudaMalloc(&__var_2__,sizeof(double)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __var_2__\n"); double * __var_3__; cudaMalloc(&__var_3__,sizeof(double)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __var_3__\n"); double * __var_4__; cudaMalloc(&__var_4__,sizeof(double)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __var_4__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ #ifdef _TIMER_ cudaEvent_t _forma_timer_start_,_forma_timer_stop_; cudaEventCreate(&_forma_timer_start_); cudaEventCreate(&_forma_timer_stop_); cudaEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = ((M-2) - 1 ) + 1; int __size_1___kernel___forma_kernel__0__ = ((N-2) - 1 ) + 1; int __max_occupancy_blocksize___kernel___forma_kernel__0__; int _max_occupancy_gridsize___kernel___forma_kernel__0__; cudaOccupancyMaxPotentialBlockSize(&_max_occupancy_gridsize___kernel___forma_kernel__0__,&__max_occupancy_blocksize___kernel___forma_kernel__0__,(const void*)__kernel___forma_kernel__0__,0,0); int __max_occupancy_blocksize___kernel___forma_kernel__0___0 = pow((double)__max_occupancy_blocksize___kernel___forma_kernel__0__, (double)(1.0/(double)2)); __max_occupancy_blocksize___kernel___forma_kernel__0___0 = FORMA_MAX(__max_occupancy_blocksize___kernel___forma_kernel__0___0/32, 1)*32; int __block_0___kernel___forma_kernel__0__ = 32; int __block_1___kernel___forma_kernel__0__ = 32; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __var_4__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_4__, N, M, __var_3__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_3__, N, M, __var_2__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, N, M, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); cudaPointerAttributes ptrAttrib___var_0__; cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost; if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess) if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice) memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice; cudaGetLastError(); cudaMemcpy(__var_0__,__var_1__, sizeof(double)*((N-0)*(M-0)), memcpy_kind___var_0__); #ifdef _TIMER_ cudaEventRecord(_forma_timer_stop_,0); cudaEventSynchronize(_forma_timer_stop_); float elapsedTime; cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); cudaEventDestroy(_forma_timer_start_); cudaEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ cudaFree(input); cudaFree(__var_1__); cudaFree(__var_2__); cudaFree(__var_3__); cudaFree(__var_4__); } /*Host Free End*/
551cb274ecc84bdce4b98e93f4769a6582a819ee.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> template <typename scalar_t> __device__ __forceinline__ scalar_t act(scalar_t z) { return 0.5 * z * (tanh(z) + 1); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_act(scalar_t z) { return 0.5 * tanh(z) + 0.5 * z * pow((1/cosh(z)),2) + 0.5; } template <typename scalar_t> __global__ void cust_act_cuda_forward_kernel( const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> input, torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> output) { const int n = blockIdx.y; //batch index const int c = blockIdx.x * blockDim.x + threadIdx.x; // column index if (c < input.size(2)){ output[n][c] = act(input[n][c]); } } torch::Tensor cust_act_cuda_forward(torch::Tensor input) { auto output = torch::zeros_like(input); const auto batch_size = input.size(0); const auto state_size = input.size(1); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(input.type(), "cust_act_forward_cuda", ([&] { hipLaunchKernelGGL(( cust_act_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, input.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), output.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>() ); })); return output; } template <typename scalar_t> __global__ void cust_act_cuda_backward_kernel( const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> input, const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> grad_out, torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> grad_inp) { const int n = blockIdx.y; //batch index const int c = blockIdx.x * blockDim.x + threadIdx.x; // column index if (c < grad_inp.size(2)){ grad_inp[n][c] = d_act(input[n][c]) * grad_out[n][c]; } } torch::Tensor cust_act_cuda_backward(torch::Tensor input, torch::Tensor grad_out) { auto grad_inp = torch::zeros_like(grad_out); const auto batch_size = grad_inp.size(0); const auto state_size = grad_inp.size(1); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(input.type(), "cust_act_backward_cuda", ([&] { hipLaunchKernelGGL(( cust_act_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, input.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), grad_out.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), grad_inp.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>()); })); return grad_inp; }
551cb274ecc84bdce4b98e93f4769a6582a819ee.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> template <typename scalar_t> __device__ __forceinline__ scalar_t act(scalar_t z) { return 0.5 * z * (tanh(z) + 1); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_act(scalar_t z) { return 0.5 * tanh(z) + 0.5 * z * pow((1/cosh(z)),2) + 0.5; } template <typename scalar_t> __global__ void cust_act_cuda_forward_kernel( const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> input, torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> output) { const int n = blockIdx.y; //batch index const int c = blockIdx.x * blockDim.x + threadIdx.x; // column index if (c < input.size(2)){ output[n][c] = act(input[n][c]); } } torch::Tensor cust_act_cuda_forward(torch::Tensor input) { auto output = torch::zeros_like(input); const auto batch_size = input.size(0); const auto state_size = input.size(1); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(input.type(), "cust_act_forward_cuda", ([&] { cust_act_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( input.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), output.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>() ); })); return output; } template <typename scalar_t> __global__ void cust_act_cuda_backward_kernel( const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> input, const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> grad_out, torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> grad_inp) { const int n = blockIdx.y; //batch index const int c = blockIdx.x * blockDim.x + threadIdx.x; // column index if (c < grad_inp.size(2)){ grad_inp[n][c] = d_act(input[n][c]) * grad_out[n][c]; } } torch::Tensor cust_act_cuda_backward(torch::Tensor input, torch::Tensor grad_out) { auto grad_inp = torch::zeros_like(grad_out); const auto batch_size = grad_inp.size(0); const auto state_size = grad_inp.size(1); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(input.type(), "cust_act_backward_cuda", ([&] { cust_act_cuda_backward_kernel<scalar_t><<<blocks, threads>>>( input.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), grad_out.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), grad_inp.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>()); })); return grad_inp; }
ff3b6add908b5db54f1391f1d12077c7117b45b3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <assert.h> #include "basics/tensor.cu" #include "basics/session.hpp" #include "layers/data.cu" #include "layers/softmax.cu" #include "layers/cross_entropy_loss.cu" #include "layers/pooling.cu" #include "layers/conv2d.cu" #include "layers/relu.cu" #include "layers/fc.cu" #include "utils/bitmap_image.hpp" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "utils/helper_cuda.h" #include "utils/utils.cu" void test_lenet_gpu() { hipError_t cudaStatus = hipSetDevice(0); checkCudaErrors(cudaStatus); show_mem(cudaStatus); startTimer(); Session* session = Session::GetNewSession(); session->gpu = true; session->batch_size = 64; size_t batch_size = session->batch_size; Data<float> data_layer(batch_size, "tmp/test/img_list.txt"); // vector<size_t*> data_tops_dims; size_t data_tops_dims0[4]; size_t data_tops_dims1[4]; data_layer.GetTopsDims({}, {data_tops_dims0, data_tops_dims1}); std::vector<Tensor<float>*> data_tops; data_tops.push_back(Tensor<float>::CreateTensorGPU(data_tops_dims0)); data_tops.push_back(Tensor<float>::CreateTensorGPU(data_tops_dims1)); Conv2D<float> conv1(5,5,3,32,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv1_top_dims[4]; conv1.GetTopsDims({data_tops_dims0}, {conv1_top_dims}); Tensor<float> * conv1_top = Tensor<float>::CreateTensorGPU(conv1_top_dims); assert(conv1_top_dims[0] == batch_size); assert(conv1_top_dims[1] == 28); assert(conv1_top_dims[2] == 28); assert(conv1_top_dims[3] == 32); Pooling<float> pool1(2, MAX, 2); size_t pool1_top_dims[4]; pool1.GetTopsDims({conv1_top_dims}, {pool1_top_dims}); Tensor<float> * pool1_top = Tensor<float>::CreateTensorGPU(pool1_top_dims); assert(pool1_top_dims[0] == batch_size); assert(pool1_top_dims[1] == 14); assert(pool1_top_dims[2] == 14); assert(pool1_top_dims[3] == 32); Relu<float> relu1; size_t relu1_top_dims[4]; relu1.GetTopsDims({pool1_top_dims}, {relu1_top_dims}); Tensor<float> * relu1_top = Tensor<float>::CreateTensorGPU(relu1_top_dims); assert(relu1_top_dims[0] == batch_size); assert(relu1_top_dims[1] == 14); assert(relu1_top_dims[2] == 14); assert(relu1_top_dims[3] == 32); Conv2D<float> conv2(5,5,32,64,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv2_top_dims[4]; conv2.GetTopsDims({relu1_top_dims}, {conv2_top_dims}); printf("relu1 top dims: %d %d %d %d \n", (int)relu1_top_dims[0], (int)relu1_top_dims[1], (int)relu1_top_dims[2], (int)relu1_top_dims[3]); printf("conv2 top dims: %d %d %d %d \n", (int)conv2_top_dims[0], (int)conv2_top_dims[1], (int)conv2_top_dims[2], (int)conv2_top_dims[3]); Tensor<float> * conv2_top = Tensor<float>::CreateTensorGPU(conv2_top_dims); assert(conv2_top_dims[0] == batch_size); assert(conv2_top_dims[1] == 14); assert(conv2_top_dims[2] == 14); assert(conv2_top_dims[3] == 64); Pooling<float> pool2(2, MAX, 2); size_t pool2_top_dims[4]; pool2.GetTopsDims({conv2_top_dims}, {pool2_top_dims}); Tensor<float> * pool2_top = Tensor<float>::CreateTensorGPU(pool2_top_dims); assert(pool2_top_dims[0] == batch_size); assert(pool2_top_dims[1] == 7); assert(pool2_top_dims[2] == 7); assert(pool2_top_dims[3] == 64); Relu<float> relu2; size_t relu2_top_dims[4]; relu2.GetTopsDims({pool2_top_dims}, {relu2_top_dims}); Tensor<float> * relu2_top = Tensor<float>::CreateTensorGPU(relu2_top_dims); FC<float> fc3(7*7*64,1024); size_t to_fc3_dims[4]; to_fc3_dims[0] = relu2_top_dims[0]; to_fc3_dims[1] = 1; to_fc3_dims[2] = 1; to_fc3_dims[3] = relu2_top_dims[1]*relu2_top_dims[2]*relu2_top_dims[3]; size_t fc3_top_dims[4]; fc3.GetTopsDims({to_fc3_dims}, {fc3_top_dims}); printf("relu2 top dims: %d %d %d %d \n", relu2_top_dims[0], relu2_top_dims[1], relu2_top_dims[2], relu2_top_dims[3]); printf("fc3 top dims: %d %d %d %d \n", fc3_top_dims[0], fc3_top_dims[1], fc3_top_dims[2], fc3_top_dims[3]); Tensor<float> * fc3_top = Tensor<float>::CreateTensorGPU(fc3_top_dims); assert(fc3_top_dims[0] == batch_size); assert(fc3_top_dims[1] == 1); assert(fc3_top_dims[2] == 1); assert(fc3_top_dims[3] == 1024); Relu<float> relu3; size_t relu3_top_dims[4]; relu3.GetTopsDims({fc3_top_dims}, {relu3_top_dims}); Tensor<float> * relu3_top = Tensor<float>::CreateTensorGPU(relu3_top_dims); FC<float> fc4(1024, 10); size_t fc4_top_dims[4]; fc4.GetTopsDims({relu3_top_dims}, {fc4_top_dims}); Tensor<float> * fc4_top = Tensor<float>::CreateTensorGPU(fc4_top_dims); assert(fc4_top_dims[0] == batch_size); assert(fc4_top_dims[1] == 1); assert(fc4_top_dims[2] == 1); assert(fc4_top_dims[3] == 10); Softmax<float> softmax; size_t sm_top_dims[4]; softmax.GetTopsDims({fc4_top_dims}, {sm_top_dims}); Tensor<float> * sm_top = Tensor<float>::CreateTensorGPU(sm_top_dims); CrossEntropyLoss<float> cel; size_t cel_top_dims[4]; cel.GetTopsDims({sm_top_dims, data_tops_dims1}, {cel_top_dims}); Tensor<float> * cel_top = Tensor<float>::CreateTensorGPU(cel_top_dims); printf("network finished setup: %3.1f ms \n", stopTimer()); show_mem(cudaStatus); cudaStatus = hipGetLastError(); checkCudaErrors(cudaStatus); startTimer(); data_layer.Forward(std::vector<Tensor<float>*> (), data_tops); printf("data forward: %3.1f ms \n", stopTimer()); startTimer(); conv1.Forward({data_tops[0]}, {conv1_top}); printf("conv1 forward: %3.1f ms \n", stopTimer()); startTimer(); pool1.Forward({conv1_top}, {pool1_top}); printf("pool1 forward: %3.1f ms \n", stopTimer()); startTimer(); relu1.Forward({pool1_top}, {relu1_top}); printf("relu1 forward: %3.1f ms \n", stopTimer()); startTimer(); conv2.Forward({relu1_top}, {conv2_top}); printf("conv2 forward: %3.1f ms \n", stopTimer()); startTimer(); pool2.Forward({conv2_top}, {pool2_top}); printf("pool2 forward: %3.1f ms \n", stopTimer()); startTimer(); relu2.Forward({pool2_top}, {relu2_top}); printf("relu2 forward: %3.1f ms \n", stopTimer()); startTimer(); // flatten the tensor Tensor<float>::ReshapeTensorGPU(relu2_top, to_fc3_dims); fc3.Forward({relu2_top}, {fc3_top}); printf("fc3 forward: %3.1f ms \n", stopTimer()); startTimer(); relu3.Forward({fc3_top}, {relu3_top}); printf("relu3 forward: %3.1f ms \n", stopTimer()); startTimer(); fc4.Forward({relu3_top}, {fc4_top}); printf("fc4 forward: %3.1f ms \n", stopTimer()); startTimer(); softmax.Forward({fc4_top}, {sm_top}); printf("softmax forward: %3.1f ms \n", stopTimer()); startTimer(); cel.Forward({sm_top, data_tops[1]}, {cel_top}); printf("cel forward: %3.1f ms \n", stopTimer()); show_mem(cudaStatus); startTimer(); data_layer.Forward(std::vector<Tensor<float>*> (), data_tops); conv1.Forward({data_tops[0]}, {conv1_top}); pool1.Forward({conv1_top}, {pool1_top}); relu1.Forward({pool1_top}, {relu1_top}); conv2.Forward({relu1_top}, {conv2_top}); pool2.Forward({conv2_top}, {pool2_top}); relu2.Forward({pool2_top}, {relu2_top}); fc3.Forward({relu2_top}, {fc3_top}); relu3.Forward({fc3_top}, {relu3_top}); fc4.Forward({relu3_top}, {fc4_top}); softmax.Forward({fc4_top}, {sm_top}); cel.Forward({sm_top, data_tops[1]}, {cel_top}); printf("finished forward: %3.1f ms \n", stopTimer()); show_mem(cudaStatus); printf("%d %d %d %d \n", fc4_top_dims[0], fc4_top_dims[1], fc4_top_dims[2], fc4_top_dims[3]); printf("%d %d %d %d \n", data_tops_dims1[0], data_tops_dims1[1], data_tops_dims1[2], data_tops_dims1[3]); printf("%d %d %d %d \n", cel_top_dims[0], cel_top_dims[1], cel_top_dims[2], cel_top_dims[3]); printf("%d %d %d %d \n", sm_top_dims[0], sm_top_dims[1], sm_top_dims[2], sm_top_dims[3]); cudaStatus = hipGetLastError(); checkCudaErrors(cudaStatus); show_mem(cudaStatus); } int main() { test_lenet_gpu(); }
ff3b6add908b5db54f1391f1d12077c7117b45b3.cu
#include <stdio.h> #include <assert.h> #include "basics/tensor.cu" #include "basics/session.hpp" #include "layers/data.cu" #include "layers/softmax.cu" #include "layers/cross_entropy_loss.cu" #include "layers/pooling.cu" #include "layers/conv2d.cu" #include "layers/relu.cu" #include "layers/fc.cu" #include "utils/bitmap_image.hpp" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "utils/helper_cuda.h" #include "utils/utils.cu" void test_lenet_gpu() { cudaError_t cudaStatus = cudaSetDevice(0); checkCudaErrors(cudaStatus); show_mem(cudaStatus); startTimer(); Session* session = Session::GetNewSession(); session->gpu = true; session->batch_size = 64; size_t batch_size = session->batch_size; Data<float> data_layer(batch_size, "tmp/test/img_list.txt"); // vector<size_t*> data_tops_dims; size_t data_tops_dims0[4]; size_t data_tops_dims1[4]; data_layer.GetTopsDims({}, {data_tops_dims0, data_tops_dims1}); std::vector<Tensor<float>*> data_tops; data_tops.push_back(Tensor<float>::CreateTensorGPU(data_tops_dims0)); data_tops.push_back(Tensor<float>::CreateTensorGPU(data_tops_dims1)); Conv2D<float> conv1(5,5,3,32,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv1_top_dims[4]; conv1.GetTopsDims({data_tops_dims0}, {conv1_top_dims}); Tensor<float> * conv1_top = Tensor<float>::CreateTensorGPU(conv1_top_dims); assert(conv1_top_dims[0] == batch_size); assert(conv1_top_dims[1] == 28); assert(conv1_top_dims[2] == 28); assert(conv1_top_dims[3] == 32); Pooling<float> pool1(2, MAX, 2); size_t pool1_top_dims[4]; pool1.GetTopsDims({conv1_top_dims}, {pool1_top_dims}); Tensor<float> * pool1_top = Tensor<float>::CreateTensorGPU(pool1_top_dims); assert(pool1_top_dims[0] == batch_size); assert(pool1_top_dims[1] == 14); assert(pool1_top_dims[2] == 14); assert(pool1_top_dims[3] == 32); Relu<float> relu1; size_t relu1_top_dims[4]; relu1.GetTopsDims({pool1_top_dims}, {relu1_top_dims}); Tensor<float> * relu1_top = Tensor<float>::CreateTensorGPU(relu1_top_dims); assert(relu1_top_dims[0] == batch_size); assert(relu1_top_dims[1] == 14); assert(relu1_top_dims[2] == 14); assert(relu1_top_dims[3] == 32); Conv2D<float> conv2(5,5,32,64,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv2_top_dims[4]; conv2.GetTopsDims({relu1_top_dims}, {conv2_top_dims}); printf("relu1 top dims: %d %d %d %d \n", (int)relu1_top_dims[0], (int)relu1_top_dims[1], (int)relu1_top_dims[2], (int)relu1_top_dims[3]); printf("conv2 top dims: %d %d %d %d \n", (int)conv2_top_dims[0], (int)conv2_top_dims[1], (int)conv2_top_dims[2], (int)conv2_top_dims[3]); Tensor<float> * conv2_top = Tensor<float>::CreateTensorGPU(conv2_top_dims); assert(conv2_top_dims[0] == batch_size); assert(conv2_top_dims[1] == 14); assert(conv2_top_dims[2] == 14); assert(conv2_top_dims[3] == 64); Pooling<float> pool2(2, MAX, 2); size_t pool2_top_dims[4]; pool2.GetTopsDims({conv2_top_dims}, {pool2_top_dims}); Tensor<float> * pool2_top = Tensor<float>::CreateTensorGPU(pool2_top_dims); assert(pool2_top_dims[0] == batch_size); assert(pool2_top_dims[1] == 7); assert(pool2_top_dims[2] == 7); assert(pool2_top_dims[3] == 64); Relu<float> relu2; size_t relu2_top_dims[4]; relu2.GetTopsDims({pool2_top_dims}, {relu2_top_dims}); Tensor<float> * relu2_top = Tensor<float>::CreateTensorGPU(relu2_top_dims); FC<float> fc3(7*7*64,1024); size_t to_fc3_dims[4]; to_fc3_dims[0] = relu2_top_dims[0]; to_fc3_dims[1] = 1; to_fc3_dims[2] = 1; to_fc3_dims[3] = relu2_top_dims[1]*relu2_top_dims[2]*relu2_top_dims[3]; size_t fc3_top_dims[4]; fc3.GetTopsDims({to_fc3_dims}, {fc3_top_dims}); printf("relu2 top dims: %d %d %d %d \n", relu2_top_dims[0], relu2_top_dims[1], relu2_top_dims[2], relu2_top_dims[3]); printf("fc3 top dims: %d %d %d %d \n", fc3_top_dims[0], fc3_top_dims[1], fc3_top_dims[2], fc3_top_dims[3]); Tensor<float> * fc3_top = Tensor<float>::CreateTensorGPU(fc3_top_dims); assert(fc3_top_dims[0] == batch_size); assert(fc3_top_dims[1] == 1); assert(fc3_top_dims[2] == 1); assert(fc3_top_dims[3] == 1024); Relu<float> relu3; size_t relu3_top_dims[4]; relu3.GetTopsDims({fc3_top_dims}, {relu3_top_dims}); Tensor<float> * relu3_top = Tensor<float>::CreateTensorGPU(relu3_top_dims); FC<float> fc4(1024, 10); size_t fc4_top_dims[4]; fc4.GetTopsDims({relu3_top_dims}, {fc4_top_dims}); Tensor<float> * fc4_top = Tensor<float>::CreateTensorGPU(fc4_top_dims); assert(fc4_top_dims[0] == batch_size); assert(fc4_top_dims[1] == 1); assert(fc4_top_dims[2] == 1); assert(fc4_top_dims[3] == 10); Softmax<float> softmax; size_t sm_top_dims[4]; softmax.GetTopsDims({fc4_top_dims}, {sm_top_dims}); Tensor<float> * sm_top = Tensor<float>::CreateTensorGPU(sm_top_dims); CrossEntropyLoss<float> cel; size_t cel_top_dims[4]; cel.GetTopsDims({sm_top_dims, data_tops_dims1}, {cel_top_dims}); Tensor<float> * cel_top = Tensor<float>::CreateTensorGPU(cel_top_dims); printf("network finished setup: %3.1f ms \n", stopTimer()); show_mem(cudaStatus); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus); startTimer(); data_layer.Forward(std::vector<Tensor<float>*> (), data_tops); printf("data forward: %3.1f ms \n", stopTimer()); startTimer(); conv1.Forward({data_tops[0]}, {conv1_top}); printf("conv1 forward: %3.1f ms \n", stopTimer()); startTimer(); pool1.Forward({conv1_top}, {pool1_top}); printf("pool1 forward: %3.1f ms \n", stopTimer()); startTimer(); relu1.Forward({pool1_top}, {relu1_top}); printf("relu1 forward: %3.1f ms \n", stopTimer()); startTimer(); conv2.Forward({relu1_top}, {conv2_top}); printf("conv2 forward: %3.1f ms \n", stopTimer()); startTimer(); pool2.Forward({conv2_top}, {pool2_top}); printf("pool2 forward: %3.1f ms \n", stopTimer()); startTimer(); relu2.Forward({pool2_top}, {relu2_top}); printf("relu2 forward: %3.1f ms \n", stopTimer()); startTimer(); // flatten the tensor Tensor<float>::ReshapeTensorGPU(relu2_top, to_fc3_dims); fc3.Forward({relu2_top}, {fc3_top}); printf("fc3 forward: %3.1f ms \n", stopTimer()); startTimer(); relu3.Forward({fc3_top}, {relu3_top}); printf("relu3 forward: %3.1f ms \n", stopTimer()); startTimer(); fc4.Forward({relu3_top}, {fc4_top}); printf("fc4 forward: %3.1f ms \n", stopTimer()); startTimer(); softmax.Forward({fc4_top}, {sm_top}); printf("softmax forward: %3.1f ms \n", stopTimer()); startTimer(); cel.Forward({sm_top, data_tops[1]}, {cel_top}); printf("cel forward: %3.1f ms \n", stopTimer()); show_mem(cudaStatus); startTimer(); data_layer.Forward(std::vector<Tensor<float>*> (), data_tops); conv1.Forward({data_tops[0]}, {conv1_top}); pool1.Forward({conv1_top}, {pool1_top}); relu1.Forward({pool1_top}, {relu1_top}); conv2.Forward({relu1_top}, {conv2_top}); pool2.Forward({conv2_top}, {pool2_top}); relu2.Forward({pool2_top}, {relu2_top}); fc3.Forward({relu2_top}, {fc3_top}); relu3.Forward({fc3_top}, {relu3_top}); fc4.Forward({relu3_top}, {fc4_top}); softmax.Forward({fc4_top}, {sm_top}); cel.Forward({sm_top, data_tops[1]}, {cel_top}); printf("finished forward: %3.1f ms \n", stopTimer()); show_mem(cudaStatus); printf("%d %d %d %d \n", fc4_top_dims[0], fc4_top_dims[1], fc4_top_dims[2], fc4_top_dims[3]); printf("%d %d %d %d \n", data_tops_dims1[0], data_tops_dims1[1], data_tops_dims1[2], data_tops_dims1[3]); printf("%d %d %d %d \n", cel_top_dims[0], cel_top_dims[1], cel_top_dims[2], cel_top_dims[3]); printf("%d %d %d %d \n", sm_top_dims[0], sm_top_dims[1], sm_top_dims[2], sm_top_dims[3]); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus); show_mem(cudaStatus); } int main() { test_lenet_gpu(); }
795966ab355af722a7efb46c81e959e9c3dabba6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/boundingbox_decode_impl.cuh" #include "include/hip/hip_fp16.h" template <typename T> __global__ void BoundingBoxDecodeKernel(const size_t size, const T *rois, const T *deltas, T *bboxes, const float m1, const float m2, const float m3, const float m4, const float s1, const float s2, const float s3, const float s4, const int max_height, const int max_width, const float ratio_clip) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { const size_t left_x = i * 4; const size_t left_y = i * 4 + 1; const size_t right_x = i * 4 + 2; const size_t right_y = i * 4 + 3; T dx = deltas[left_x] * s1 + m1; T dy = deltas[left_y] * s2 + m2; T dw = deltas[right_x] * s3 + m3; T dh = deltas[right_y] * s4 + m4; T max_ratio = abs(log(ratio_clip)); dw = dw > max_ratio ? max_ratio : (dw < (-max_ratio) ? (-max_ratio) : dw); dh = dh > max_ratio ? max_ratio : (dh < (-max_ratio) ? (-max_ratio) : dh); T px = (rois[left_x] + rois[right_x]) * 0.5f; T py = (rois[left_y] + rois[right_y]) * 0.5f; T pw = rois[right_x] - rois[left_x] + 1.0f; T ph = rois[right_y] - rois[left_y] + 1.0f; T gx = px + pw * dx; T gy = py + ph * dy; T gw = pw * exp(dw); T gh = ph * exp(dh); T x1 = gx - gw * 0.5f + 0.5f; T y1 = gy - gh * 0.5f + 0.5f; T x2 = gx + gw * 0.5f - 0.5f; T y2 = gy + gh * 0.5f - 0.5f; x1 = x1 > max_width ? max_width : (x1 < 0 ? 0 : x1); y1 = y1 > max_height ? max_height : (y1 < 0 ? 0 : y1); x2 = x2 > max_width ? max_width : (x2 < 0 ? 0 : x2); y2 = y2 > max_height ? max_height : (y2 < 0 ? 0 : y2); bboxes[left_x] = x1; bboxes[left_y] = y1; bboxes[right_x] = x2; bboxes[right_y] = y2; } } template <> __global__ void BoundingBoxDecodeKernel(const size_t size, const half *rois, const half *deltas, half *bboxes, const float m1, const float m2, const float m3, const float m4, const float s1, const float s2, const float s3, const float s4, const int max_height, const int max_width, const float ratio_clip) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { const size_t left_x = i * 4; const size_t left_y = i * 4 + 1; const size_t right_x = i * 4 + 2; const size_t right_y = i * 4 + 3; float dx = static_cast<float>(deltas[left_x]) * s1 + m1; float dy = static_cast<float>(deltas[left_y]) * s2 + m2; float dw = static_cast<float>(deltas[right_x]) * s3 + m3; float dh = static_cast<float>(deltas[right_y]) * s4 + m4; float max_ratio = abs(log(ratio_clip)); dw = dw > max_ratio ? max_ratio : (dw < (-max_ratio) ? (-max_ratio) : dw); dh = dh > max_ratio ? max_ratio : (dh < (-max_ratio) ? (-max_ratio) : dh); float px = static_cast<float>(rois[left_x] + rois[right_x]) * 0.5f; float py = static_cast<float>(rois[left_y] + rois[right_y]) * 0.5f; float pw = static_cast<float>(rois[right_x] - rois[left_x]) + 1.0f; float ph = static_cast<float>(rois[right_y] - rois[left_y]) + 1.0f; float gx = px + pw * dx; float gy = py + ph * dy; float gw = pw * exp(dw); float gh = ph * exp(dh); float x1 = gx - gw * 0.5f + 0.5f; float y1 = gy - gh * 0.5f + 0.5f; float x2 = gx + gw * 0.5f - 0.5f; float y2 = gy + gh * 0.5f - 0.5f; x1 = x1 > max_width ? max_width : (x1 < 0 ? 0 : x1); y1 = y1 > max_height ? max_height : (y1 < 0 ? 0 : y1); x2 = x2 > max_width ? max_width : (x2 < 0 ? 0 : x2); y2 = y2 > max_height ? max_height : (y2 < 0 ? 0 : y2); bboxes[left_x] = x1; bboxes[left_y] = y1; bboxes[right_x] = x2; bboxes[right_y] = y2; } } template <typename T> void BoundingBoxDecode(const size_t size, const T *rois, const T *deltas, T *bboxes, const float &m1, const float &m2, const float &m3, const float &m4, const float &s1, const float &s2, const float &s3, const float &s4, const int &max_height, const int &max_width, const float &ratio_clip, const uint32_t &device_id, hipStream_t cuda_stream) { hipLaunchKernelGGL(( BoundingBoxDecodeKernel), dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size, rois, deltas, bboxes, m1, m2, m3, m4, s1, s2, s3, s4, max_height, max_width, ratio_clip); } template <> void BoundingBoxDecode(const size_t size, const half *rois, const half *deltas, half *bboxes, const float &m1, const float &m2, const float &m3, const float &m4, const float &s1, const float &s2, const float &s3, const float &s4, const int &max_height, const int &max_width, const float &ratio_clip, const uint32_t &device_id, hipStream_t cuda_stream) { hipLaunchKernelGGL(( BoundingBoxDecodeKernel<half>), dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size, rois, deltas, bboxes, m1, m2, m3, m4, s1, s2, s3, s4, max_height, max_width, ratio_clip); } template CUDA_LIB_EXPORT void BoundingBoxDecode<float>(const size_t size, const float *rois, const float *deltas, float *bboxes, const float &m1, const float &m2, const float &m3, const float &m4, const float &s1, const float &s2, const float &s3, const float &s4, const int &max_height, const int &max_width, const float &ratio_clip, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void BoundingBoxDecode<half>(const size_t size, const half *rois, const half *deltas, half *bboxes, const float &m1, const float &m2, const float &m3, const float &m4, const float &s1, const float &s2, const float &s3, const float &s4, const int &max_height, const int &max_width, const float &ratio_clip, const uint32_t &device_id, hipStream_t cuda_stream);
795966ab355af722a7efb46c81e959e9c3dabba6.cu
/** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/boundingbox_decode_impl.cuh" #include "include/cuda_fp16.h" template <typename T> __global__ void BoundingBoxDecodeKernel(const size_t size, const T *rois, const T *deltas, T *bboxes, const float m1, const float m2, const float m3, const float m4, const float s1, const float s2, const float s3, const float s4, const int max_height, const int max_width, const float ratio_clip) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { const size_t left_x = i * 4; const size_t left_y = i * 4 + 1; const size_t right_x = i * 4 + 2; const size_t right_y = i * 4 + 3; T dx = deltas[left_x] * s1 + m1; T dy = deltas[left_y] * s2 + m2; T dw = deltas[right_x] * s3 + m3; T dh = deltas[right_y] * s4 + m4; T max_ratio = abs(log(ratio_clip)); dw = dw > max_ratio ? max_ratio : (dw < (-max_ratio) ? (-max_ratio) : dw); dh = dh > max_ratio ? max_ratio : (dh < (-max_ratio) ? (-max_ratio) : dh); T px = (rois[left_x] + rois[right_x]) * 0.5f; T py = (rois[left_y] + rois[right_y]) * 0.5f; T pw = rois[right_x] - rois[left_x] + 1.0f; T ph = rois[right_y] - rois[left_y] + 1.0f; T gx = px + pw * dx; T gy = py + ph * dy; T gw = pw * exp(dw); T gh = ph * exp(dh); T x1 = gx - gw * 0.5f + 0.5f; T y1 = gy - gh * 0.5f + 0.5f; T x2 = gx + gw * 0.5f - 0.5f; T y2 = gy + gh * 0.5f - 0.5f; x1 = x1 > max_width ? max_width : (x1 < 0 ? 0 : x1); y1 = y1 > max_height ? max_height : (y1 < 0 ? 0 : y1); x2 = x2 > max_width ? max_width : (x2 < 0 ? 0 : x2); y2 = y2 > max_height ? max_height : (y2 < 0 ? 0 : y2); bboxes[left_x] = x1; bboxes[left_y] = y1; bboxes[right_x] = x2; bboxes[right_y] = y2; } } template <> __global__ void BoundingBoxDecodeKernel(const size_t size, const half *rois, const half *deltas, half *bboxes, const float m1, const float m2, const float m3, const float m4, const float s1, const float s2, const float s3, const float s4, const int max_height, const int max_width, const float ratio_clip) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { const size_t left_x = i * 4; const size_t left_y = i * 4 + 1; const size_t right_x = i * 4 + 2; const size_t right_y = i * 4 + 3; float dx = static_cast<float>(deltas[left_x]) * s1 + m1; float dy = static_cast<float>(deltas[left_y]) * s2 + m2; float dw = static_cast<float>(deltas[right_x]) * s3 + m3; float dh = static_cast<float>(deltas[right_y]) * s4 + m4; float max_ratio = abs(log(ratio_clip)); dw = dw > max_ratio ? max_ratio : (dw < (-max_ratio) ? (-max_ratio) : dw); dh = dh > max_ratio ? max_ratio : (dh < (-max_ratio) ? (-max_ratio) : dh); float px = static_cast<float>(rois[left_x] + rois[right_x]) * 0.5f; float py = static_cast<float>(rois[left_y] + rois[right_y]) * 0.5f; float pw = static_cast<float>(rois[right_x] - rois[left_x]) + 1.0f; float ph = static_cast<float>(rois[right_y] - rois[left_y]) + 1.0f; float gx = px + pw * dx; float gy = py + ph * dy; float gw = pw * exp(dw); float gh = ph * exp(dh); float x1 = gx - gw * 0.5f + 0.5f; float y1 = gy - gh * 0.5f + 0.5f; float x2 = gx + gw * 0.5f - 0.5f; float y2 = gy + gh * 0.5f - 0.5f; x1 = x1 > max_width ? max_width : (x1 < 0 ? 0 : x1); y1 = y1 > max_height ? max_height : (y1 < 0 ? 0 : y1); x2 = x2 > max_width ? max_width : (x2 < 0 ? 0 : x2); y2 = y2 > max_height ? max_height : (y2 < 0 ? 0 : y2); bboxes[left_x] = x1; bboxes[left_y] = y1; bboxes[right_x] = x2; bboxes[right_y] = y2; } } template <typename T> void BoundingBoxDecode(const size_t size, const T *rois, const T *deltas, T *bboxes, const float &m1, const float &m2, const float &m3, const float &m4, const float &s1, const float &s2, const float &s3, const float &s4, const int &max_height, const int &max_width, const float &ratio_clip, const uint32_t &device_id, cudaStream_t cuda_stream) { BoundingBoxDecodeKernel<<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0, cuda_stream>>>( size, rois, deltas, bboxes, m1, m2, m3, m4, s1, s2, s3, s4, max_height, max_width, ratio_clip); } template <> void BoundingBoxDecode(const size_t size, const half *rois, const half *deltas, half *bboxes, const float &m1, const float &m2, const float &m3, const float &m4, const float &s1, const float &s2, const float &s3, const float &s4, const int &max_height, const int &max_width, const float &ratio_clip, const uint32_t &device_id, cudaStream_t cuda_stream) { BoundingBoxDecodeKernel<half><<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0, cuda_stream>>>( size, rois, deltas, bboxes, m1, m2, m3, m4, s1, s2, s3, s4, max_height, max_width, ratio_clip); } template CUDA_LIB_EXPORT void BoundingBoxDecode<float>(const size_t size, const float *rois, const float *deltas, float *bboxes, const float &m1, const float &m2, const float &m3, const float &m4, const float &s1, const float &s2, const float &s3, const float &s4, const int &max_height, const int &max_width, const float &ratio_clip, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void BoundingBoxDecode<half>(const size_t size, const half *rois, const half *deltas, half *bboxes, const float &m1, const float &m2, const float &m3, const float &m4, const float &s1, const float &s2, const float &s3, const float &s4, const int &max_height, const int &max_width, const float &ratio_clip, const uint32_t &device_id, cudaStream_t cuda_stream);
9fcfe99810ede47f15161a244dce38a1d551a62f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // standard #include <iostream> #include <sstream> #include <vector> #include <cstddef> #include <chrono> // internal #include "core.hpp" #include "console.hpp" #include "algorithm/cuda.hpp" namespace quick_hull { // CUDA error handling static void cuda_handle_error(hipError_t code, const char * file, int line) { if (code == hipSuccess) return; program::panic_begin << "Error(" << code << ") in" << file << " at line " << line << program::panic_end; } #define macro_cuda_call(call) (cuda_handle_error(call, __FILE__, __LINE__)) // CUDA version of vector2 functions __device__ Vector2 cuda_vector_make(double x, double y) { Vector2 vector; vector.x = x; vector.y = y; return vector; } __device__ Vector2 cuda_vector_subtract(const Vector2& a, const Vector2 &b) { return cuda_vector_make(a.x - b.x, a.y - b.y); } __device__ Vector2 cuda_vector_add(const Vector2& a, const Vector2& b) { return cuda_vector_make(a.x + b.x, a.y + b.y); } __device__ Vector2 cuda_vector_get_normal(const Vector2& vector) { return cuda_vector_make(-vector.y, vector.x); } __device__ double cuda_vector_get_sqr_magnitude(const Vector2& vector) { return (vector.x * vector.x) + (vector.y * vector.y); } __device__ double cuda_vector_dot_product(const Vector2 &vector_a, const Vector2 &vector_b) { return (vector_a.x * vector_b.x) + (vector_a.y * vector_b.y); } __device__ Vector2 cuda_vector_project(const Vector2 &vector_a, const Vector2 &vector_b) { double vector_b_sqr_magnitude = cuda_vector_get_sqr_magnitude(vector_b); if (vector_b_sqr_magnitude == 0) { return cuda_vector_make(0, 0); } double relativeness = cuda_vector_dot_product(vector_a, vector_b); return cuda_vector_make( vector_b.x * (relativeness / vector_b_sqr_magnitude), vector_b.y * (relativeness / vector_b_sqr_magnitude) ); } __global__ void find_farest_point_from_line ( Cuda_Thread_Data* result_point_index, // only the first thread in a block has write access const Vector2* points, int point_count, int line_point_a_index, int line_point_b_index ) { // Declares the block memory __shared__ Cuda_Thread_Data block_data [ 1024 // ..the maximum amount of threads ]; // Defines dimensions int block_count = gridDim.x; int thread_count = blockDim.x; int unit_count = thread_count * block_count; // Divides point count by number of computation units and ceil it to get a per thread standard amount of points int points_per_thread = (point_count + unit_count + 1) / unit_count; // Defines thread local data int block_id = blockIdx.x; int thread_id = threadIdx.x; int unit_index = thread_count * block_id + thread_id; int points_start = (unit_index) * points_per_thread; int points_end = (unit_index + 1) * points_per_thread; // Amount of points may not be a power of two. // That is why, it is necessary to trim points_end of the last thread if (points_end > point_count) // ..no need to check a thread id, because only the last one could satisfy this condition { points_end = point_count; } // By default, for each thread, sets point A as the farest one block_data[thread_id].farest_point_index = line_point_a_index; block_data[thread_id].farest_point_sqr_distance = 0; // Defines AB line and its normal towards the considered side Vector2 line_point_a = points[line_point_a_index]; Vector2 line_point_b = points[line_point_b_index]; Vector2 line = cuda_vector_subtract(line_point_b, line_point_a); Vector2 line_normal = cuda_vector_get_normal(line); // 1. Finds the farest point from AB line for(int point_index = points_start; point_index < points_end; point_index++) { Vector2 point = points[point_index]; // A point, which is a base of a projection on the AB line from the current point Vector2 projection_base = cuda_vector_add( cuda_vector_project( cuda_vector_subtract(point, line_point_a), line ), line_point_a ); // The projection (as vector) from the base to the point Vector2 projection = cuda_vector_subtract(point, projection_base); // Relativity checks that point is on the considered side of the AB line double relativity = cuda_vector_dot_product(projection, line_normal); double projection_sqr_magnitude = cuda_vector_get_sqr_magnitude(projection); // Saves the farest point data if (relativity > 0 && block_data[thread_id].farest_point_sqr_distance < projection_sqr_magnitude) { block_data[thread_id].farest_point_index = point_index; block_data[thread_id].farest_point_sqr_distance = projection_sqr_magnitude; } } __syncthreads(); // 2. Reduces a result of all threads using the (binary / a power of two) reduction method. // The farest point will be stored in the first thread memory. for (int reduction_step = 2; (thread_id % reduction_step == 0) && (thread_count >= reduction_step); reduction_step <<= 1) { // Gets a position of a new subject thread. int subject_thread_id = thread_id + (reduction_step >> 1); if (subject_thread_id >= thread_count) break; // .. there is no subject thread left // Checks for a subject thread to have a farther point and overrides this thread data when it does. if (block_data[thread_id].farest_point_sqr_distance < block_data[subject_thread_id].farest_point_sqr_distance) { block_data[thread_id].farest_point_sqr_distance = block_data[subject_thread_id].farest_point_sqr_distance; block_data[thread_id].farest_point_index = block_data[subject_thread_id].farest_point_index; }; __syncthreads(); } // 3. Copies the result of the first thread into thread's block result. if (thread_id == 0) { auto* copy_to = &result_point_index[block_id]; auto* copy_from = &block_data[0]; copy_to->farest_point_index = copy_from->farest_point_index; copy_to->farest_point_sqr_distance = copy_from->farest_point_sqr_distance; } } // Implements [ grow ] function template<int T_Block_Count, int T_Thread_Count> void Algorithm_Cuda::grow ( int point_a_index, int point_b_index, const std::vector<Vector2> & points ) { // Captures kernel start time auto stopwatch_start = std::chrono::steady_clock::now(); // 1. Runs the kernel function, which finds far points along the AB line hipLaunchKernelGGL(( find_farest_point_from_line), dim3(T_Block_Count), dim3(T_Thread_Count), 0, 0, device_far_points, device_points_copy, points.size(), point_a_index, point_b_index ); // Waits the kernel function macro_cuda_call(hipDeviceSynchronize()); // Captures kernel end time auto stopwatch_end = std::chrono::steady_clock::now(); // Gathers analytic data this->total_recursion_call_count++; // ..recurison counting this->kernel_total_time += std::chrono::duration<double, std::milli> // ..kernel ellapsed milliseconds ( stopwatch_end - stopwatch_start ) .count(); // 2. Copies the result data from the device to the host size_t far_points_memsize = T_Block_Count * sizeof(Cuda_Thread_Data); stopwatch_start = std::chrono::steady_clock::now(); macro_cuda_call(hipMemcpy(host_far_points, device_far_points, far_points_memsize, hipMemcpyDeviceToHost)); stopwatch_end = std::chrono::steady_clock::now(); this->cuda_memcpy_total_time += std::chrono::duration<double, std::milli> ( stopwatch_end - stopwatch_start ) .count(); // 3. Reduces result of blocks into the first block memory. // The farest point among others will be stored. stopwatch_start = std::chrono::steady_clock::now(); for (int step = 1; step < T_Block_Count; step <<= 1) { for (int index = 0; index < T_Block_Count; index += (step << 1)) { auto* recepient = &host_far_points[index]; auto* donor = &host_far_points[index + step]; if (recepient->farest_point_sqr_distance < donor->farest_point_sqr_distance) { recepient->farest_point_index = donor->farest_point_index; recepient->farest_point_sqr_distance = donor->farest_point_sqr_distance; } } } stopwatch_end = std::chrono::steady_clock::now(); this->reduction_total_time += std::chrono::duration<double, std::milli> ( stopwatch_end - stopwatch_start ) .count(); // Gets the farest point from AB line int point_c_index = host_far_points[0].farest_point_index; // Checks it to be different from A and B point if (point_c_index != point_a_index && point_c_index != point_b_index) { // Tries to grow the convex hull from the AC line grow<T_Block_Count, T_Thread_Count>( point_a_index, point_c_index, points ); // Adds the founded farest point to the convex hull convex_hull->push_back(points[point_c_index]); // Tries to grow the convex hull from the CB line grow<T_Block_Count, T_Thread_Count>( point_c_index, point_b_index, points ); } } template<int T_Block_Count, int T_Thread_Count> std::vector<Vector2> * Algorithm_Cuda::internal_run ( const std::vector<Vector2> &points ) { int most_left_index = 0; int most_right_index = points.size() - 1; // 1. Finds the most left and right point for (int index = 0; index < points.size(); index++) { const auto point = points[index]; const auto most_right = points[most_right_index]; const auto most_left = points[most_left_index]; if (point.x > most_right.x || (point.x == most_right.x && point.y > most_right.y)) { most_right_index = index; } else if (point.x < most_left.x || (point.x == most_right.x && point.y < most_right.y)) { most_left_index = index; } } size_t far_points_memsize = (T_Block_Count) * sizeof(Cuda_Thread_Data); size_t points_memsize = points.size() * sizeof(Vector2); // 2. Allocates special CUDA memory auto stopwatch_start = std::chrono::steady_clock::now(); // ============================================================ // !!! This CALL is 60~80 % of the program execution time !!!!! // !!! Makes CUDA implementation massively inefficient !!!!!!!! macro_cuda_call(hipHostMalloc((void**) &host_far_points, far_points_memsize, hipHostMallocDefault)); macro_cuda_call(hipMalloc((void**) &device_far_points, far_points_memsize)); macro_cuda_call(hipMalloc((void**) &device_points_copy, points_memsize)); macro_cuda_call(hipMemcpy(device_points_copy, points.data(), points_memsize, hipMemcpyHostToDevice)); // ============================================================ auto stopwatch_end = std::chrono::steady_clock::now(); this->cuda_meminit_total_time = std::chrono::duration<double, std::milli> ( stopwatch_end - stopwatch_start ) .count(); // 3. Constructs a convex from right and left side of line going through the most left and right points. convex_hull = new std::vector<Vector2>(); convex_hull->push_back(points[most_left_index]); // 3.1 Grows the convex hull from ML->MR line grow<T_Block_Count, T_Thread_Count>( most_left_index, most_right_index, points ); convex_hull->push_back(points[most_right_index]); // 3.2 Grows the convex hull from MR->ML line grow<T_Block_Count, T_Thread_Count>( most_right_index, most_left_index, points ); // 4. Releases special CUDA memory macro_cuda_call(hipHostFree(host_far_points)); macro_cuda_call(hipFree(device_far_points)); macro_cuda_call(hipFree(device_points_copy)); return convex_hull; } Algorithm_Cuda::~Algorithm_Cuda() { } Algorithm_Cuda::Algorithm_Cuda(int block_power) { this->block_power = block_power; } std::vector<Vector2> * Algorithm_Cuda::run(const std::vector<Vector2> &points) { // Resets analytic data this->kernel_total_time = 0; this->total_recursion_call_count = 0; this->cuda_memcpy_total_time = 0; this->reduction_total_time = 0; // Depending on the power of the number of CUDA blocks, // matches with a call, with a correct number of threads per block switch(this->block_power) { // x1 case 0 : return internal_run<1,1024>(points); // x2 case 1 : return internal_run<2,512>(points); // x4 case 2 : return internal_run<4,256>(points); // x8 case 3 : return internal_run<8,128>(points); // x16 default: // by default, if block power has not been set then 16 blocks and 64 threads will be used case 4 : return internal_run<16,64>(points); // x32 case 5 : return internal_run<32,32>(points); // x64 case 6 : return internal_run<64,16>(points); // x128 case 7 : return internal_run<128,8>(points); // x256 case 8 : return internal_run<256,4>(points); // x512 case 9 : return internal_run<512,2>(points); // x1024 case 10: return internal_run<1024,1>(points); } } }
9fcfe99810ede47f15161a244dce38a1d551a62f.cu
// standard #include <iostream> #include <sstream> #include <vector> #include <cstddef> #include <chrono> // internal #include "core.hpp" #include "console.hpp" #include "algorithm/cuda.hpp" namespace quick_hull { // CUDA error handling static void cuda_handle_error(cudaError_t code, const char * file, int line) { if (code == cudaSuccess) return; program::panic_begin << "Error(" << code << ") in" << file << " at line " << line << program::panic_end; } #define macro_cuda_call(call) (cuda_handle_error(call, __FILE__, __LINE__)) // CUDA version of vector2 functions __device__ Vector2 cuda_vector_make(double x, double y) { Vector2 vector; vector.x = x; vector.y = y; return vector; } __device__ Vector2 cuda_vector_subtract(const Vector2& a, const Vector2 &b) { return cuda_vector_make(a.x - b.x, a.y - b.y); } __device__ Vector2 cuda_vector_add(const Vector2& a, const Vector2& b) { return cuda_vector_make(a.x + b.x, a.y + b.y); } __device__ Vector2 cuda_vector_get_normal(const Vector2& vector) { return cuda_vector_make(-vector.y, vector.x); } __device__ double cuda_vector_get_sqr_magnitude(const Vector2& vector) { return (vector.x * vector.x) + (vector.y * vector.y); } __device__ double cuda_vector_dot_product(const Vector2 &vector_a, const Vector2 &vector_b) { return (vector_a.x * vector_b.x) + (vector_a.y * vector_b.y); } __device__ Vector2 cuda_vector_project(const Vector2 &vector_a, const Vector2 &vector_b) { double vector_b_sqr_magnitude = cuda_vector_get_sqr_magnitude(vector_b); if (vector_b_sqr_magnitude == 0) { return cuda_vector_make(0, 0); } double relativeness = cuda_vector_dot_product(vector_a, vector_b); return cuda_vector_make( vector_b.x * (relativeness / vector_b_sqr_magnitude), vector_b.y * (relativeness / vector_b_sqr_magnitude) ); } __global__ void find_farest_point_from_line ( Cuda_Thread_Data* result_point_index, // only the first thread in a block has write access const Vector2* points, int point_count, int line_point_a_index, int line_point_b_index ) { // Declares the block memory __shared__ Cuda_Thread_Data block_data [ 1024 // ..the maximum amount of threads ]; // Defines dimensions int block_count = gridDim.x; int thread_count = blockDim.x; int unit_count = thread_count * block_count; // Divides point count by number of computation units and ceil it to get a per thread standard amount of points int points_per_thread = (point_count + unit_count + 1) / unit_count; // Defines thread local data int block_id = blockIdx.x; int thread_id = threadIdx.x; int unit_index = thread_count * block_id + thread_id; int points_start = (unit_index) * points_per_thread; int points_end = (unit_index + 1) * points_per_thread; // Amount of points may not be a power of two. // That is why, it is necessary to trim points_end of the last thread if (points_end > point_count) // ..no need to check a thread id, because only the last one could satisfy this condition { points_end = point_count; } // By default, for each thread, sets point A as the farest one block_data[thread_id].farest_point_index = line_point_a_index; block_data[thread_id].farest_point_sqr_distance = 0; // Defines AB line and its normal towards the considered side Vector2 line_point_a = points[line_point_a_index]; Vector2 line_point_b = points[line_point_b_index]; Vector2 line = cuda_vector_subtract(line_point_b, line_point_a); Vector2 line_normal = cuda_vector_get_normal(line); // 1. Finds the farest point from AB line for(int point_index = points_start; point_index < points_end; point_index++) { Vector2 point = points[point_index]; // A point, which is a base of a projection on the AB line from the current point Vector2 projection_base = cuda_vector_add( cuda_vector_project( cuda_vector_subtract(point, line_point_a), line ), line_point_a ); // The projection (as vector) from the base to the point Vector2 projection = cuda_vector_subtract(point, projection_base); // Relativity checks that point is on the considered side of the AB line double relativity = cuda_vector_dot_product(projection, line_normal); double projection_sqr_magnitude = cuda_vector_get_sqr_magnitude(projection); // Saves the farest point data if (relativity > 0 && block_data[thread_id].farest_point_sqr_distance < projection_sqr_magnitude) { block_data[thread_id].farest_point_index = point_index; block_data[thread_id].farest_point_sqr_distance = projection_sqr_magnitude; } } __syncthreads(); // 2. Reduces a result of all threads using the (binary / a power of two) reduction method. // The farest point will be stored in the first thread memory. for (int reduction_step = 2; (thread_id % reduction_step == 0) && (thread_count >= reduction_step); reduction_step <<= 1) { // Gets a position of a new subject thread. int subject_thread_id = thread_id + (reduction_step >> 1); if (subject_thread_id >= thread_count) break; // .. there is no subject thread left // Checks for a subject thread to have a farther point and overrides this thread data when it does. if (block_data[thread_id].farest_point_sqr_distance < block_data[subject_thread_id].farest_point_sqr_distance) { block_data[thread_id].farest_point_sqr_distance = block_data[subject_thread_id].farest_point_sqr_distance; block_data[thread_id].farest_point_index = block_data[subject_thread_id].farest_point_index; }; __syncthreads(); } // 3. Copies the result of the first thread into thread's block result. if (thread_id == 0) { auto* copy_to = &result_point_index[block_id]; auto* copy_from = &block_data[0]; copy_to->farest_point_index = copy_from->farest_point_index; copy_to->farest_point_sqr_distance = copy_from->farest_point_sqr_distance; } } // Implements [ grow ] function template<int T_Block_Count, int T_Thread_Count> void Algorithm_Cuda::grow ( int point_a_index, int point_b_index, const std::vector<Vector2> & points ) { // Captures kernel start time auto stopwatch_start = std::chrono::steady_clock::now(); // 1. Runs the kernel function, which finds far points along the AB line find_farest_point_from_line<<<T_Block_Count, T_Thread_Count>>> ( device_far_points, device_points_copy, points.size(), point_a_index, point_b_index ); // Waits the kernel function macro_cuda_call(cudaDeviceSynchronize()); // Captures kernel end time auto stopwatch_end = std::chrono::steady_clock::now(); // Gathers analytic data this->total_recursion_call_count++; // ..recurison counting this->kernel_total_time += std::chrono::duration<double, std::milli> // ..kernel ellapsed milliseconds ( stopwatch_end - stopwatch_start ) .count(); // 2. Copies the result data from the device to the host size_t far_points_memsize = T_Block_Count * sizeof(Cuda_Thread_Data); stopwatch_start = std::chrono::steady_clock::now(); macro_cuda_call(cudaMemcpy(host_far_points, device_far_points, far_points_memsize, cudaMemcpyDeviceToHost)); stopwatch_end = std::chrono::steady_clock::now(); this->cuda_memcpy_total_time += std::chrono::duration<double, std::milli> ( stopwatch_end - stopwatch_start ) .count(); // 3. Reduces result of blocks into the first block memory. // The farest point among others will be stored. stopwatch_start = std::chrono::steady_clock::now(); for (int step = 1; step < T_Block_Count; step <<= 1) { for (int index = 0; index < T_Block_Count; index += (step << 1)) { auto* recepient = &host_far_points[index]; auto* donor = &host_far_points[index + step]; if (recepient->farest_point_sqr_distance < donor->farest_point_sqr_distance) { recepient->farest_point_index = donor->farest_point_index; recepient->farest_point_sqr_distance = donor->farest_point_sqr_distance; } } } stopwatch_end = std::chrono::steady_clock::now(); this->reduction_total_time += std::chrono::duration<double, std::milli> ( stopwatch_end - stopwatch_start ) .count(); // Gets the farest point from AB line int point_c_index = host_far_points[0].farest_point_index; // Checks it to be different from A and B point if (point_c_index != point_a_index && point_c_index != point_b_index) { // Tries to grow the convex hull from the AC line grow<T_Block_Count, T_Thread_Count>( point_a_index, point_c_index, points ); // Adds the founded farest point to the convex hull convex_hull->push_back(points[point_c_index]); // Tries to grow the convex hull from the CB line grow<T_Block_Count, T_Thread_Count>( point_c_index, point_b_index, points ); } } template<int T_Block_Count, int T_Thread_Count> std::vector<Vector2> * Algorithm_Cuda::internal_run ( const std::vector<Vector2> &points ) { int most_left_index = 0; int most_right_index = points.size() - 1; // 1. Finds the most left and right point for (int index = 0; index < points.size(); index++) { const auto point = points[index]; const auto most_right = points[most_right_index]; const auto most_left = points[most_left_index]; if (point.x > most_right.x || (point.x == most_right.x && point.y > most_right.y)) { most_right_index = index; } else if (point.x < most_left.x || (point.x == most_right.x && point.y < most_right.y)) { most_left_index = index; } } size_t far_points_memsize = (T_Block_Count) * sizeof(Cuda_Thread_Data); size_t points_memsize = points.size() * sizeof(Vector2); // 2. Allocates special CUDA memory auto stopwatch_start = std::chrono::steady_clock::now(); // ============================================================ // !!! This CALL is 60~80 % of the program execution time !!!!! // !!! Makes CUDA implementation massively inefficient !!!!!!!! macro_cuda_call(cudaHostAlloc((void**) &host_far_points, far_points_memsize, cudaHostAllocDefault)); macro_cuda_call(cudaMalloc((void**) &device_far_points, far_points_memsize)); macro_cuda_call(cudaMalloc((void**) &device_points_copy, points_memsize)); macro_cuda_call(cudaMemcpy(device_points_copy, points.data(), points_memsize, cudaMemcpyHostToDevice)); // ============================================================ auto stopwatch_end = std::chrono::steady_clock::now(); this->cuda_meminit_total_time = std::chrono::duration<double, std::milli> ( stopwatch_end - stopwatch_start ) .count(); // 3. Constructs a convex from right and left side of line going through the most left and right points. convex_hull = new std::vector<Vector2>(); convex_hull->push_back(points[most_left_index]); // 3.1 Grows the convex hull from ML->MR line grow<T_Block_Count, T_Thread_Count>( most_left_index, most_right_index, points ); convex_hull->push_back(points[most_right_index]); // 3.2 Grows the convex hull from MR->ML line grow<T_Block_Count, T_Thread_Count>( most_right_index, most_left_index, points ); // 4. Releases special CUDA memory macro_cuda_call(cudaFreeHost(host_far_points)); macro_cuda_call(cudaFree(device_far_points)); macro_cuda_call(cudaFree(device_points_copy)); return convex_hull; } Algorithm_Cuda::~Algorithm_Cuda() { } Algorithm_Cuda::Algorithm_Cuda(int block_power) { this->block_power = block_power; } std::vector<Vector2> * Algorithm_Cuda::run(const std::vector<Vector2> &points) { // Resets analytic data this->kernel_total_time = 0; this->total_recursion_call_count = 0; this->cuda_memcpy_total_time = 0; this->reduction_total_time = 0; // Depending on the power of the number of CUDA blocks, // matches with a call, with a correct number of threads per block switch(this->block_power) { // x1 case 0 : return internal_run<1,1024>(points); // x2 case 1 : return internal_run<2,512>(points); // x4 case 2 : return internal_run<4,256>(points); // x8 case 3 : return internal_run<8,128>(points); // x16 default: // by default, if block power has not been set then 16 blocks and 64 threads will be used case 4 : return internal_run<16,64>(points); // x32 case 5 : return internal_run<32,32>(points); // x64 case 6 : return internal_run<64,16>(points); // x128 case 7 : return internal_run<128,8>(points); // x256 case 8 : return internal_run<256,4>(points); // x512 case 9 : return internal_run<512,2>(points); // x1024 case 10: return internal_run<1024,1>(points); } } }
33af34188330541b6bff130de626cce6491abb9f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <stdlib.h> #include <math.h> __global__ void matmul_tile(float *a, float *b, float *c, int n, int m, int p, int TW, int NTB) { extern __shared__ float bigarray[]; float *aTile=&bigarray[0], *bTile=&bigarray[TW*TW]; int tx = threadIdx.x; int ty = threadIdx.y; float *cvalue;//scope: thread int col = tx + blockDim.x * blockIdx.x; int row = ty + blockDim.y * blockIdx.y; int tileNum, aIdx, bIdx, tileIdx_m, tileCol; tileNum = p/TW + (p % TW != 0); cvalue = (float *)malloc(NTB*sizeof(float)); //init c tiles for (tileIdx_m=0; tileIdx_m<NTB; tileIdx_m++) cvalue[tileIdx_m] = 0.; for (int tileIdx_p=0; tileIdx_p<tileNum; tileIdx_p++) { //load aTile aIdx = tileIdx_p*TW + tx; if(aIdx >= p || row >= n){ aTile[ty*TW+tx] = 0.; }else{ aTile[ty*TW+tx] = a[row*p + aIdx]; //Copy to shared memory } for(tileIdx_m=0; tileIdx_m<NTB; tileIdx_m++){ //load btile[ty][tx] with element [ty][tx] in tileIdx_m-th tile of b if((blockIdx.x % NTB) == 0){ bIdx = tileIdx_p*TW +ty; tileCol = tx + blockDim.x*(blockIdx.x+tileIdx_m); if(bIdx >= p || tileCol >= m){ bTile[ty*TW+tx] = 0.; }else{ bTile[ty*TW+tx] = b[bIdx*m + tileCol]; //Copy to shared memory } __syncthreads(); for (int k=0; k<TW; k++){ cvalue[tileIdx_m] += aTile[ty*TW+k] * bTile[k*TW+tx]; //printf("bx = %d, by = %d, (tx = %d, ty = %d) @ tileIdx_m = %d : a=%.2f b=%.2f \n",blockIdx.x, blockIdx.y, tx, ty, tileIdx_m, aTile[ty*TW+k],bTile[k*TW+tx]); } //printf("bx = %d, by = %d, (tx = %d, ty = %d) @ tileIdx_m = %d: c= %.2f\n",blockIdx.x, blockIdx.y, tx, ty, tileIdx_m, cvalue[tileIdx_m]); __syncthreads(); c[row*m + tileCol] = cvalue[tileIdx_m]; } } } if(row < n && col < m){ for(tileIdx_m=0; tileIdx_m<NTB; tileIdx_m++){ //load to C if((blockIdx.x % NTB) == 0){ tileCol = tx + blockDim.x*(blockIdx.x+tileIdx_m); c[row*m + tileCol] = cvalue[tileIdx_m]; } } } free(cvalue); } void cpu_matrixmult(float *a,float *b, float *c, int n, int m, int p) { int index, indexa, indexb; float cvalue; for(int col=0;col < m; col++){ for(int row=0;row < n; row++) { indexb = col; index = row * m + col; cvalue = 0.; for (indexa = row*p; indexa < (row*p + p); indexa++, indexb+=m){ cvalue += a[indexa]*b[indexb]; } c[index] -= cvalue; //NOTE: This calculates the diff between CPU and GPU computations. } } } int main(int argc, char *argv[]) { int i, j; // loop counters int gpucount = 0; // Count of available GPUs int Grid_Dim_x = 1; //Grid dimension, x int Grid_Dim_y = 1; //Grid dimension, y int Block_Dim_x = 1; //Block dimension, x int Block_Dim_y = 1; //Block dimension, y int TW = 1; int NTB = 1; int n,m,p; // matrix dimension float *a,*b,*c; float *dev_a, *dev_b, *dev_c; int size_a, size_b, size_c; // number of bytes in arrays hipEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also hipError_t errorcode; // --------------------SET PARAMETERS AND DATA ----------------------- errorcode = hipGetDeviceCount(&gpucount); if (errorcode == hipErrorNoDevice) { printf("No GPUs are visible\n"); exit(-1); } //else printf("Device count = %d\n",gpucount); if (argc<10) { printf("# of inputs: %d\n", argc); printf("Usage: Task1GPUsp <n> <m> <p> <block dim x> <block dim y> <grid dim x> <grid dim y> <tile width> <Number of tiles>\n"); exit (-1); } n = atoi(argv[1]); m = atoi(argv[2]); p = atoi(argv[3]); Block_Dim_x = atoi(argv[4]); // non-Square block, # of rows Block_Dim_y = atoi(argv[5]); // non-Square block, # of cols if (Block_Dim_x * Block_Dim_y > 1024) { printf("Error, too many threads in block\n"); exit (-1); } //not really used in Task2 Grid_Dim_x = atoi(argv[6]); // non-Square grid, # of rows Grid_Dim_y = atoi(argv[7]); // non-Square grid, # of cols TW = atoi(argv[8]); if(Block_Dim_x != Block_Dim_y || Block_Dim_x != TW || Block_Dim_y != TW){ printf("Error, bx, by, tw must be equal\n"); exit(-1); } //printf("A Matrix Dimension = %dx%d\n",n,p); //printf("B Matrix Dimension = %dx%d\n",p,m); //printf("C Matrix Dimension = %dx%d\n",n,m); Grid_Dim_x = m/Block_Dim_x + (m % Block_Dim_x != 0); Grid_Dim_y = n/Block_Dim_y + (n % Block_Dim_y != 0); NTB = atoi(argv[9]); //printf("Grid_x = %d Grid_y = %d NTB = %d\n", Grid_Dim_x,Grid_Dim_y,NTB); dim3 Grid(Grid_Dim_x, Grid_Dim_y); //Grid structure dim3 Block(Block_Dim_x, Block_Dim_y); //Block structure size_a = n * p * sizeof(float); // number of bytes in total in arrays size_b = p * m * sizeof(float); // number of bytes in total in arrays size_c = n * m * sizeof(float); // number of bytes in total in arrays a = (float*) malloc(size_a); // dynamically allocated memory for arrays on host b = (float*) malloc(size_b); c = (float*) malloc(size_c); // results from GPU srand(12345); //int p = n; //Used here only to illustrate proper initialization for non-square case //printf ("a\n"); for(i=0;i < n;i++){ for(j=0;j < p;j++) { a[i * p + j] = (float) rand() / (float) RAND_MAX; //a[i * p + j] = (float) (i+j); //printf("%.2f ", a[i * p + j]); } //printf("\n"); } //printf("b\n"); for(i=0;i < p;i++){ for(j=0;j < m;j++) { b[i * m + j] = (float) rand() / (float) RAND_MAX; //b[i * m + j] = (float) (i+j); //printf("%.2f ", b[i * m + j]); } //printf("\n"); } // ------------- COMPUTATION DONE ON GPU ---------------------------- errorcode = hipMalloc((void**)&dev_a, size_a); // allocate memory on device if(errorcode != hipSuccess) { // print the CUDA error message and exit printf("hipMalloc error: %s\n", hipGetErrorString(errorcode)); exit(-1); } errorcode = hipMalloc((void**)&dev_b, size_b); if(errorcode != hipSuccess) { // print the CUDA error message and exit printf("hipMalloc error: %s\n", hipGetErrorString(errorcode)); exit(-1); } errorcode = hipMalloc((void**)&dev_c, size_c); if(errorcode != hipSuccess) { // print the CUDA error message and exit printf("hipMalloc error: %s\n", hipGetErrorString(errorcode)); exit(-1); } hipMemcpy(dev_a, a , size_a ,hipMemcpyHostToDevice); hipMemcpy(dev_b, b , size_b ,hipMemcpyHostToDevice); hipEventCreate(&start); // instrument code to measure start time hipEventCreate(&stop); hipEventRecord(start, 0); // hipEventSynchronize(start); // not needed size_t Ns = 2 * TW*TW * sizeof(float); size_t heapSize = Grid_Dim_x * Grid_Dim_y * Block_Dim_x* Block_Dim_y * NTB * sizeof(float)/4; errorcode = hipDeviceSetLimit(hipLimitMallocHeapSize, heapSize); if(errorcode != hipSuccess) { // print the CUDA error message and exit printf("cuda device heap error: %s\n", hipGetErrorString(errorcode)); exit(-1); } hipLaunchKernelGGL(( matmul_tile), dim3(Grid),dim3(Block), Ns, 0, dev_a, dev_b, dev_c, n, m, p, TW, NTB); // make the host block until the device is finished with foo hipDeviceSynchronize(); // check for error errorcode = hipGetLastError(); if(errorcode != hipSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", hipGetErrorString(errorcode)); exit(-1); } hipEventRecord(stop, 0); // instrument code to measure end time hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time_ms, start, stop ); hipMemcpy(c,dev_c, size_c ,hipMemcpyDeviceToHost); //printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); // exec. time //printf("c\n"); for(i=0;i < n;i++){ for(j=0;j < m;j++) { printf("%.2f ", c[i * m + j]); } printf("\n"); } // ------------- COMPUTATION DONE ON HOST CPU ---------------------------- // DEBUGGING USE ONLY (AND FOR LIMITED NUMBERS OF TIMING RUNS) /* hipEventRecord(start, 0); // use same timing cpu_matrixmult(a,b,c, n, m, p); // do calculation on host (NOTE: This computes the diff with GPU result.) hipEventRecord(stop, 0); // instrument code to measue end time hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exec. time // ------------------- check device creates correct results ----------------- double error, suma, sumb, sumc, ai, bi, ci; suma = 0.; sumb = 0; sumc = 0; for(i=0;i < n*n;i++) { ai = (double) a[i]; bi = (double) b[i]; ci = (double) c[i]; suma += ai*ai; sumb += bi*bi; sumc += ci*ci; } suma = sqrt(suma); sumb = sqrt(sumb); sumc = sqrt(sumc); error = sumc/(n*suma*sumb); printf("Scaled error between GPU and CPU: %e\n", error); */ // -------------- clean up --------------------------------------- free(a); free(b); free(c); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); hipEventDestroy(start); hipEventDestroy(stop); return 0; }
33af34188330541b6bff130de626cce6491abb9f.cu
#include <stdio.h> #include <cuda.h> #include <stdlib.h> #include <math.h> __global__ void matmul_tile(float *a, float *b, float *c, int n, int m, int p, int TW, int NTB) { extern __shared__ float bigarray[]; float *aTile=&bigarray[0], *bTile=&bigarray[TW*TW]; int tx = threadIdx.x; int ty = threadIdx.y; float *cvalue;//scope: thread int col = tx + blockDim.x * blockIdx.x; int row = ty + blockDim.y * blockIdx.y; int tileNum, aIdx, bIdx, tileIdx_m, tileCol; tileNum = p/TW + (p % TW != 0); cvalue = (float *)malloc(NTB*sizeof(float)); //init c tiles for (tileIdx_m=0; tileIdx_m<NTB; tileIdx_m++) cvalue[tileIdx_m] = 0.; for (int tileIdx_p=0; tileIdx_p<tileNum; tileIdx_p++) { //load aTile aIdx = tileIdx_p*TW + tx; if(aIdx >= p || row >= n){ aTile[ty*TW+tx] = 0.; }else{ aTile[ty*TW+tx] = a[row*p + aIdx]; //Copy to shared memory } for(tileIdx_m=0; tileIdx_m<NTB; tileIdx_m++){ //load btile[ty][tx] with element [ty][tx] in tileIdx_m-th tile of b if((blockIdx.x % NTB) == 0){ bIdx = tileIdx_p*TW +ty; tileCol = tx + blockDim.x*(blockIdx.x+tileIdx_m); if(bIdx >= p || tileCol >= m){ bTile[ty*TW+tx] = 0.; }else{ bTile[ty*TW+tx] = b[bIdx*m + tileCol]; //Copy to shared memory } __syncthreads(); for (int k=0; k<TW; k++){ cvalue[tileIdx_m] += aTile[ty*TW+k] * bTile[k*TW+tx]; //printf("bx = %d, by = %d, (tx = %d, ty = %d) @ tileIdx_m = %d : a=%.2f b=%.2f \n",blockIdx.x, blockIdx.y, tx, ty, tileIdx_m, aTile[ty*TW+k],bTile[k*TW+tx]); } //printf("bx = %d, by = %d, (tx = %d, ty = %d) @ tileIdx_m = %d: c= %.2f\n",blockIdx.x, blockIdx.y, tx, ty, tileIdx_m, cvalue[tileIdx_m]); __syncthreads(); c[row*m + tileCol] = cvalue[tileIdx_m]; } } } if(row < n && col < m){ for(tileIdx_m=0; tileIdx_m<NTB; tileIdx_m++){ //load to C if((blockIdx.x % NTB) == 0){ tileCol = tx + blockDim.x*(blockIdx.x+tileIdx_m); c[row*m + tileCol] = cvalue[tileIdx_m]; } } } free(cvalue); } void cpu_matrixmult(float *a,float *b, float *c, int n, int m, int p) { int index, indexa, indexb; float cvalue; for(int col=0;col < m; col++){ for(int row=0;row < n; row++) { indexb = col; index = row * m + col; cvalue = 0.; for (indexa = row*p; indexa < (row*p + p); indexa++, indexb+=m){ cvalue += a[indexa]*b[indexb]; } c[index] -= cvalue; //NOTE: This calculates the diff between CPU and GPU computations. } } } int main(int argc, char *argv[]) { int i, j; // loop counters int gpucount = 0; // Count of available GPUs int Grid_Dim_x = 1; //Grid dimension, x int Grid_Dim_y = 1; //Grid dimension, y int Block_Dim_x = 1; //Block dimension, x int Block_Dim_y = 1; //Block dimension, y int TW = 1; int NTB = 1; int n,m,p; // matrix dimension float *a,*b,*c; float *dev_a, *dev_b, *dev_c; int size_a, size_b, size_c; // number of bytes in arrays cudaEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also cudaError_t errorcode; // --------------------SET PARAMETERS AND DATA ----------------------- errorcode = cudaGetDeviceCount(&gpucount); if (errorcode == cudaErrorNoDevice) { printf("No GPUs are visible\n"); exit(-1); } //else printf("Device count = %d\n",gpucount); if (argc<10) { printf("# of inputs: %d\n", argc); printf("Usage: Task1GPUsp <n> <m> <p> <block dim x> <block dim y> <grid dim x> <grid dim y> <tile width> <Number of tiles>\n"); exit (-1); } n = atoi(argv[1]); m = atoi(argv[2]); p = atoi(argv[3]); Block_Dim_x = atoi(argv[4]); // non-Square block, # of rows Block_Dim_y = atoi(argv[5]); // non-Square block, # of cols if (Block_Dim_x * Block_Dim_y > 1024) { printf("Error, too many threads in block\n"); exit (-1); } //not really used in Task2 Grid_Dim_x = atoi(argv[6]); // non-Square grid, # of rows Grid_Dim_y = atoi(argv[7]); // non-Square grid, # of cols TW = atoi(argv[8]); if(Block_Dim_x != Block_Dim_y || Block_Dim_x != TW || Block_Dim_y != TW){ printf("Error, bx, by, tw must be equal\n"); exit(-1); } //printf("A Matrix Dimension = %dx%d\n",n,p); //printf("B Matrix Dimension = %dx%d\n",p,m); //printf("C Matrix Dimension = %dx%d\n",n,m); Grid_Dim_x = m/Block_Dim_x + (m % Block_Dim_x != 0); Grid_Dim_y = n/Block_Dim_y + (n % Block_Dim_y != 0); NTB = atoi(argv[9]); //printf("Grid_x = %d Grid_y = %d NTB = %d\n", Grid_Dim_x,Grid_Dim_y,NTB); dim3 Grid(Grid_Dim_x, Grid_Dim_y); //Grid structure dim3 Block(Block_Dim_x, Block_Dim_y); //Block structure size_a = n * p * sizeof(float); // number of bytes in total in arrays size_b = p * m * sizeof(float); // number of bytes in total in arrays size_c = n * m * sizeof(float); // number of bytes in total in arrays a = (float*) malloc(size_a); // dynamically allocated memory for arrays on host b = (float*) malloc(size_b); c = (float*) malloc(size_c); // results from GPU srand(12345); //int p = n; //Used here only to illustrate proper initialization for non-square case //printf ("a\n"); for(i=0;i < n;i++){ for(j=0;j < p;j++) { a[i * p + j] = (float) rand() / (float) RAND_MAX; //a[i * p + j] = (float) (i+j); //printf("%.2f ", a[i * p + j]); } //printf("\n"); } //printf("b\n"); for(i=0;i < p;i++){ for(j=0;j < m;j++) { b[i * m + j] = (float) rand() / (float) RAND_MAX; //b[i * m + j] = (float) (i+j); //printf("%.2f ", b[i * m + j]); } //printf("\n"); } // ------------- COMPUTATION DONE ON GPU ---------------------------- errorcode = cudaMalloc((void**)&dev_a, size_a); // allocate memory on device if(errorcode != cudaSuccess) { // print the CUDA error message and exit printf("cudaMalloc error: %s\n", cudaGetErrorString(errorcode)); exit(-1); } errorcode = cudaMalloc((void**)&dev_b, size_b); if(errorcode != cudaSuccess) { // print the CUDA error message and exit printf("cudaMalloc error: %s\n", cudaGetErrorString(errorcode)); exit(-1); } errorcode = cudaMalloc((void**)&dev_c, size_c); if(errorcode != cudaSuccess) { // print the CUDA error message and exit printf("cudaMalloc error: %s\n", cudaGetErrorString(errorcode)); exit(-1); } cudaMemcpy(dev_a, a , size_a ,cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b , size_b ,cudaMemcpyHostToDevice); cudaEventCreate(&start); // instrument code to measure start time cudaEventCreate(&stop); cudaEventRecord(start, 0); // cudaEventSynchronize(start); // not needed size_t Ns = 2 * TW*TW * sizeof(float); size_t heapSize = Grid_Dim_x * Grid_Dim_y * Block_Dim_x* Block_Dim_y * NTB * sizeof(float)/4; errorcode = cudaDeviceSetLimit(cudaLimitMallocHeapSize, heapSize); if(errorcode != cudaSuccess) { // print the CUDA error message and exit printf("cuda device heap error: %s\n", cudaGetErrorString(errorcode)); exit(-1); } matmul_tile<<<Grid,Block, Ns>>>(dev_a, dev_b, dev_c, n, m, p, TW, NTB); // make the host block until the device is finished with foo cudaThreadSynchronize(); // check for error errorcode = cudaGetLastError(); if(errorcode != cudaSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", cudaGetErrorString(errorcode)); exit(-1); } cudaEventRecord(stop, 0); // instrument code to measure end time cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop ); cudaMemcpy(c,dev_c, size_c ,cudaMemcpyDeviceToHost); //printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); // exec. time //printf("c\n"); for(i=0;i < n;i++){ for(j=0;j < m;j++) { printf("%.2f ", c[i * m + j]); } printf("\n"); } // ------------- COMPUTATION DONE ON HOST CPU ---------------------------- // DEBUGGING USE ONLY (AND FOR LIMITED NUMBERS OF TIMING RUNS) /* cudaEventRecord(start, 0); // use same timing cpu_matrixmult(a,b,c, n, m, p); // do calculation on host (NOTE: This computes the diff with GPU result.) cudaEventRecord(stop, 0); // instrument code to measue end time cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exec. time // ------------------- check device creates correct results ----------------- double error, suma, sumb, sumc, ai, bi, ci; suma = 0.; sumb = 0; sumc = 0; for(i=0;i < n*n;i++) { ai = (double) a[i]; bi = (double) b[i]; ci = (double) c[i]; suma += ai*ai; sumb += bi*bi; sumc += ci*ci; } suma = sqrt(suma); sumb = sqrt(sumb); sumc = sqrt(sumc); error = sumc/(n*suma*sumb); printf("Scaled error between GPU and CPU: %e\n", error); */ // -------------- clean up --------------------------------------- free(a); free(b); free(c); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
082dc0b61b5d79fcc5b9ec9058072af9969adfd4.hip
// !!! This is a file automatically generated by hipify!!! // Matrix Multiplication in CUDA #include <stdio.h> //#include <string.h> //#include <assert.h> //#include <stdlib.h> #include <hip/hip_runtime.h> // includes, project //////////////////////////////////////////////////////////////////////////////// // declarations, forward #define WIDTH 32 #define THREAD_BLOCK_WIDTH 4 #define THREAD_WIDTH 8 extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); // FILL HERE: define constant variable // MatrixMul kernel /** * CUDA Kernel Device code * * Computes the matrix multiplication of A and B into C. The 3 matrices have the same * number of elements WIDTH*WIDTH. */ // FILL HERE: translate C-version matrixMul to CUDA-version kernel code __global__ void MatrixMul(float* A, float* B, float* C, unsigned long long* runtime) { // TODO : Kernel Function // C = A * B // --> unsigned long long start_time = clock64(); int thread_row = threadIdx.y; int thread_col = threadIdx.x; int blk_col_offset = blockIdx.x; int blk_row_offset = blockIdx.y; float lC= 0.0; __shared__ float sA[THREAD_WIDTH][THREAD_WIDTH]; __shared__ float sB[THREAD_WIDTH][THREAD_WIDTH]; for(int k = 0; k < THREAD_BLOCK_WIDTH; k++) { // load up the whole A and B subblock for thise particulat setion sA[thread_row][thread_col]=A[((blk_row_offset*THREAD_WIDTH)+thread_row)*WIDTH + (k*THREAD_WIDTH)+thread_col]; sB[thread_row][thread_col]=B[((k*THREAD_WIDTH)+thread_row)*WIDTH + (blk_col_offset*THREAD_WIDTH)+thread_col]; // make sure all data as been copied to subblocks before continuing calculation __syncthreads(); for(int kk = 0; kk < THREAD_WIDTH;kk++) { lC += sA[thread_row][kk] * sB[kk][thread_col]; } C[((blk_row_offset*THREAD_WIDTH)+thread_row)*WIDTH + (blk_col_offset*THREAD_WIDTH) + thread_col] += lC; lC = 0.0; } unsigned long long stop_time = clock64(); runtime[(THREAD_WIDTH*blk_row_offset)+thread_col + ((THREAD_WIDTH*blk_row_offset)+thread_row)*WIDTH] = (unsigned long long)(stop_time - start_time); } /** * Host main routine */ int main(void) { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; // Print the matrix size to be used, and compute its size int size = WIDTH*WIDTH*sizeof(float); printf("[MatrixMul of %d x %d elements]\n", WIDTH, WIDTH); // Allocate the host input matrix h_A float *h_A = (float *)malloc(size); // Allocate the host input matrix h_B float *h_B = (float *)malloc(size); // Allocate the host input matrix h_C float *h_C = (float *)malloc(size); // Allocate the host matrix for compute check float *reference = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL || reference == NULL) { fprintf(stderr, "Failed to allocate host matrices!\n"); exit(EXIT_FAILURE); } // Initialize the host input matrices for (int i = 0; i < WIDTH; ++i) { for (int j = 0; j < WIDTH; ++j) { h_A[i*WIDTH + j] = 0.01f * (float)(rand()%50); h_B[i*WIDTH + j] = 1.0f * (float)(rand()%50); } } memset(h_C, 0, size); memset(reference, 0, size); // compute the matrix multiplication on the CPU for comparison computeGold(reference, h_A, h_B, WIDTH, WIDTH, WIDTH); // Allocate device input matrices // TODO : Leave/Remove the given hipMalloc code properly // --> float* d_A = NULL; err = hipMalloc((void**)&d_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device matrix A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float* d_B = NULL; err = hipMalloc((void**)&d_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device matrix B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // <-- // Allocate the device output matrix float* d_C = NULL; err = hipMalloc((void**)&d_C, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device matrix C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input matrix A and B in host memory to the device input matrices in // device memory // TODO : Add proper mem copy APIs according to the memory that matrix A and B will be stored // --> printf("Copy input data from the host memory to the CUDA device\n"); err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);// FILL HERE if (err != hipSuccess) { fprintf(stderr, "Failed to copy matrix A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);// FILL HERE if (err != hipSuccess) { fprintf(stderr, "Failed to copy matrix B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // <-- // copy zeroed out array into device/global memory err = hipMemcpy(d_C, h_C, size, hipMemcpyHostToDevice);// FILL HERE if (err != hipSuccess) { fprintf(stderr, "Failed to copy matrix B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // TODO : Clock Measurements // Add code to return clock cycles from kernel // --> #ifdef TM unsigned long long* d_runtime; int r_size = WIDTH*WIDTH*sizeof(unsigned long long); unsigned long long* runtime = (unsigned long long*)malloc(r_size); memset(runtime, 0, r_size); hipMalloc((void**)&d_runtime, r_size); #endif // <-- // TODO : Kernel Invocation // Assign as many threads as the size of matrix in a thread block and // invoke the kernel function. // --> dim3 blocksPerGrid(4,4);// FILL HERE dim3 threadsPerBlock(8,8);// FILL HERE printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); // MatrixMul(d_A, d_B, d_C); hipLaunchKernelGGL(( MatrixMul) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, d_runtime); // <-- err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch matrixMul kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipDeviceSynchronize(); // Copy the device result matrix in device memory to the host result matrix // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy matrix C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipDeviceSynchronize(); // Verify that the result matrix is correct bool res = 1; printf("h_C: "); for (int i = 0; i < WIDTH*WIDTH; i++){ printf("%0.2f ",i, h_C[i]); } printf("\n"); printf("ref: "); for (int i = 0; i < WIDTH*WIDTH; i++){ printf("%0.2f ",i, reference[i]); } printf("\n"); for (int i = 0; i < WIDTH*WIDTH; i++) { float diff = fabs(reference[i] - h_C[i]); if(diff > 0.001f) { res = 0; break; } } printf("Test %s\n", (res == 1) ? "PASSED" : "FAILED"); // TODO : Get elapsed clock cycles from device to host // Take the longest time as kernel execution time // --> #ifdef TM hipMemcpy(runtime, d_runtime, r_size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); unsigned long long elapsed_time = 0; for(int i = 0; i < WIDTH*WIDTH; i++) if(elapsed_time < runtime[i]) elapsed_time = runtime[i]; printf("Kernel Execution Time: %llu cycles\n", elapsed_time); #endif // <-- // TODO : Free device global memory // Leave/Remove the given hipFree statement according to your data allocation // --> hipFree(d_A); hipFree(d_B); hipFree(d_C); #ifdef TM hipFree(d_runtime); #endif // <-- // Free host memory free(h_A); free(h_B); free(h_C); free(reference); #ifdef TM free(runtime); #endif return 0; } void computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB) { for (unsigned int i = 0; i < hA; ++i) for (unsigned int j = 0; j < wB; ++j) { double sum = 0; for (unsigned int k = 0; k < wA; ++k) { double a = A[i * wA + k]; double b = B[k * wB + j]; sum += a * b; } C[i * wB + j] = (float)sum; } }
082dc0b61b5d79fcc5b9ec9058072af9969adfd4.cu
// Matrix Multiplication in CUDA #include <stdio.h> //#include <string.h> //#include <assert.h> //#include <stdlib.h> #include <cuda_runtime.h> // includes, project //////////////////////////////////////////////////////////////////////////////// // declarations, forward #define WIDTH 32 #define THREAD_BLOCK_WIDTH 4 #define THREAD_WIDTH 8 extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); // FILL HERE: define constant variable // MatrixMul kernel /** * CUDA Kernel Device code * * Computes the matrix multiplication of A and B into C. The 3 matrices have the same * number of elements WIDTH*WIDTH. */ // FILL HERE: translate C-version matrixMul to CUDA-version kernel code __global__ void MatrixMul(float* A, float* B, float* C, unsigned long long* runtime) { // TODO : Kernel Function // C = A * B // --> unsigned long long start_time = clock64(); int thread_row = threadIdx.y; int thread_col = threadIdx.x; int blk_col_offset = blockIdx.x; int blk_row_offset = blockIdx.y; float lC= 0.0; __shared__ float sA[THREAD_WIDTH][THREAD_WIDTH]; __shared__ float sB[THREAD_WIDTH][THREAD_WIDTH]; for(int k = 0; k < THREAD_BLOCK_WIDTH; k++) { // load up the whole A and B subblock for thise particulat setion sA[thread_row][thread_col]=A[((blk_row_offset*THREAD_WIDTH)+thread_row)*WIDTH + (k*THREAD_WIDTH)+thread_col]; sB[thread_row][thread_col]=B[((k*THREAD_WIDTH)+thread_row)*WIDTH + (blk_col_offset*THREAD_WIDTH)+thread_col]; // make sure all data as been copied to subblocks before continuing calculation __syncthreads(); for(int kk = 0; kk < THREAD_WIDTH;kk++) { lC += sA[thread_row][kk] * sB[kk][thread_col]; } C[((blk_row_offset*THREAD_WIDTH)+thread_row)*WIDTH + (blk_col_offset*THREAD_WIDTH) + thread_col] += lC; lC = 0.0; } unsigned long long stop_time = clock64(); runtime[(THREAD_WIDTH*blk_row_offset)+thread_col + ((THREAD_WIDTH*blk_row_offset)+thread_row)*WIDTH] = (unsigned long long)(stop_time - start_time); } /** * Host main routine */ int main(void) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the matrix size to be used, and compute its size int size = WIDTH*WIDTH*sizeof(float); printf("[MatrixMul of %d x %d elements]\n", WIDTH, WIDTH); // Allocate the host input matrix h_A float *h_A = (float *)malloc(size); // Allocate the host input matrix h_B float *h_B = (float *)malloc(size); // Allocate the host input matrix h_C float *h_C = (float *)malloc(size); // Allocate the host matrix for compute check float *reference = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL || reference == NULL) { fprintf(stderr, "Failed to allocate host matrices!\n"); exit(EXIT_FAILURE); } // Initialize the host input matrices for (int i = 0; i < WIDTH; ++i) { for (int j = 0; j < WIDTH; ++j) { h_A[i*WIDTH + j] = 0.01f * (float)(rand()%50); h_B[i*WIDTH + j] = 1.0f * (float)(rand()%50); } } memset(h_C, 0, size); memset(reference, 0, size); // compute the matrix multiplication on the CPU for comparison computeGold(reference, h_A, h_B, WIDTH, WIDTH, WIDTH); // Allocate device input matrices // TODO : Leave/Remove the given cudaMalloc code properly // --> float* d_A = NULL; err = cudaMalloc((void**)&d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device matrix A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float* d_B = NULL; err = cudaMalloc((void**)&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device matrix B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // <-- // Allocate the device output matrix float* d_C = NULL; err = cudaMalloc((void**)&d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device matrix C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input matrix A and B in host memory to the device input matrices in // device memory // TODO : Add proper mem copy APIs according to the memory that matrix A and B will be stored // --> printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);// FILL HERE if (err != cudaSuccess) { fprintf(stderr, "Failed to copy matrix A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);// FILL HERE if (err != cudaSuccess) { fprintf(stderr, "Failed to copy matrix B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // <-- // copy zeroed out array into device/global memory err = cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice);// FILL HERE if (err != cudaSuccess) { fprintf(stderr, "Failed to copy matrix B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // TODO : Clock Measurements // Add code to return clock cycles from kernel // --> #ifdef TM unsigned long long* d_runtime; int r_size = WIDTH*WIDTH*sizeof(unsigned long long); unsigned long long* runtime = (unsigned long long*)malloc(r_size); memset(runtime, 0, r_size); cudaMalloc((void**)&d_runtime, r_size); #endif // <-- // TODO : Kernel Invocation // Assign as many threads as the size of matrix in a thread block and // invoke the kernel function. // --> dim3 blocksPerGrid(4,4);// FILL HERE dim3 threadsPerBlock(8,8);// FILL HERE printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); // MatrixMul(d_A, d_B, d_C); MatrixMul <<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_runtime); // <-- err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch matrixMul kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaThreadSynchronize(); // Copy the device result matrix in device memory to the host result matrix // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy matrix C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaThreadSynchronize(); // Verify that the result matrix is correct bool res = 1; printf("h_C: "); for (int i = 0; i < WIDTH*WIDTH; i++){ printf("%0.2f ",i, h_C[i]); } printf("\n"); printf("ref: "); for (int i = 0; i < WIDTH*WIDTH; i++){ printf("%0.2f ",i, reference[i]); } printf("\n"); for (int i = 0; i < WIDTH*WIDTH; i++) { float diff = fabs(reference[i] - h_C[i]); if(diff > 0.001f) { res = 0; break; } } printf("Test %s\n", (res == 1) ? "PASSED" : "FAILED"); // TODO : Get elapsed clock cycles from device to host // Take the longest time as kernel execution time // --> #ifdef TM cudaMemcpy(runtime, d_runtime, r_size, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); unsigned long long elapsed_time = 0; for(int i = 0; i < WIDTH*WIDTH; i++) if(elapsed_time < runtime[i]) elapsed_time = runtime[i]; printf("Kernel Execution Time: %llu cycles\n", elapsed_time); #endif // <-- // TODO : Free device global memory // Leave/Remove the given cudaFree statement according to your data allocation // --> cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); #ifdef TM cudaFree(d_runtime); #endif // <-- // Free host memory free(h_A); free(h_B); free(h_C); free(reference); #ifdef TM free(runtime); #endif return 0; } void computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB) { for (unsigned int i = 0; i < hA; ++i) for (unsigned int j = 0; j < wB; ++j) { double sum = 0; for (unsigned int k = 0; k < wA; ++k) { double a = A[i * wA + k]; double b = B[k * wB + j]; sum += a * b; } C[i * wB + j] = (float)sum; } }
712d6a30120b1f940866bf8a1e843120ad686d78.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "common.h" #define SOFTMAX_THREADS 128 __global__ void cunn_SoftMax_updateOutput_kernel( float *output, float *input, int nframe, int dim, int stride0, int stride1) { __shared__ float buffer[SOFTMAX_THREADS+1]; float *input_k = input + blockIdx.x*dim*stride0 + blockIdx.y*stride1 + blockIdx.z; float *output_k = output + blockIdx.x*dim*stride0 + blockIdx.y*stride1 + blockIdx.z; int i_start = threadIdx.x; int i_end = dim; int i_step = blockDim.x; // max? buffer[threadIdx.x] = -FLT_MAX; for (int i=i_start; i<i_end; i+=i_step) { float z = input_k[i*stride0]; if (buffer[threadIdx.x] < z) buffer[threadIdx.x] = z; } __syncthreads(); // reduce if (threadIdx.x == 0) { float max_k = -FLT_MAX; for (int i=0; i<blockDim.x; i++) { if (max_k < buffer[i]) max_k = buffer[i]; } buffer[SOFTMAX_THREADS] = max_k; } __syncthreads(); // sum? float max_k = buffer[SOFTMAX_THREADS]; buffer[threadIdx.x] = 0; for (int i=i_start; i<i_end; i+=i_step) { float z = __expf(input_k[i*stride0]-max_k); buffer[threadIdx.x] += z; output_k[i*stride0] = z; } __syncthreads(); // reduce if (threadIdx.x == 0) { float sum_k = 0; for (int i=0; i<blockDim.x; i++) sum_k += buffer[i]; buffer[SOFTMAX_THREADS] = sum_k; } __syncthreads(); // softmax float sum_k = buffer[SOFTMAX_THREADS]; for (int i=i_start; i<i_end; i+=i_step) output_k[i*stride0] = output_k[i*stride0] / sum_k; } __global__ void cunn_SoftMax_updateGradInput_kernel( float *gradInput, float *output, float *gradOutput, int nframe, int dim, int stride0, int stride1) { __shared__ float buffer[SOFTMAX_THREADS]; float *gradInput_k = gradInput + blockIdx.x*dim*stride0 + blockIdx.y * stride1 + blockIdx.z; float *output_k = output + blockIdx.x*dim*stride0 + blockIdx.y * stride1 + blockIdx.z; float *gradOutput_k = gradOutput + blockIdx.x*dim*stride0 + blockIdx.y * stride1 + blockIdx.z; int i_start = threadIdx.x; int i_end = dim; int i_step = blockDim.x; // sum? buffer[threadIdx.x] = 0; for (int i=i_start; i<i_end; i+=i_step) buffer[threadIdx.x] += gradOutput_k[i*stride0] * output_k[i*stride0]; __syncthreads(); // reduce if (threadIdx.x == 0) { float sum_k = 0; for (int i=0; i<blockDim.x; i++) sum_k += buffer[i]; buffer[0] = sum_k; } __syncthreads(); float sum_k = buffer[0]; for (int i=i_start; i<i_end; i+=i_step) gradInput_k[i*stride0] = output_k[i*stride0] * (gradOutput_k[i*stride0] - sum_k); } void THNN_CudaSoftMax_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output) { THCUNN_assertSameGPU(state, 2, input, output); input = THCudaTensor_newContiguous(state, input); THCudaTensor_resizeAs(state, output, input); long batchSize, dim, stride0, stride1 = 1; long blocksY = 1, blocksZ = 1; if (input->nDimension == 1) { batchSize = 1; dim = input->size[0]; stride0 = 1; } else if (input->nDimension == 2) { batchSize = input->size[0]; dim = input->size[1]; stride0 = 1; } else if (input->nDimension == 3) { batchSize = 1; dim = input->size[0]; blocksY = input->size[1]; blocksZ = input->size[2]; stride0 = blocksY * blocksZ; stride1 = blocksZ; } else if (input->nDimension == 4) { batchSize = input->size[0]; dim = input->size[1]; blocksY = input->size[2]; blocksZ = input->size[3]; stride0 = blocksY * blocksZ; stride1 = blocksZ; } else { THError("1D, 2D, 3D or 4D tensor expected"); } // when possible use only 2d grid of thread blocks to stay compatible with compute capability 2.X devices. if (blocksY * blocksZ < 65536) { blocksY *= blocksZ; blocksZ = 1; if (input->nDimension == 3 || input->nDimension == 4) { stride0 = blocksY * blocksZ; stride1 = blocksZ; } } dim3 blocks(batchSize, blocksY, blocksZ); dim3 threads(SOFTMAX_THREADS); hipLaunchKernelGGL(( cunn_SoftMax_updateOutput_kernel), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), THCudaTensor_data(state, output), THCudaTensor_data(state, input), batchSize, dim, stride0, stride1 ); THCudaCheck(hipGetLastError()); THCudaTensor_free(state, input); } void THNN_CudaSoftMax_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *output) { THCUNN_assertSameGPU(state, 3, output, gradOutput, gradInput); output = THCudaTensor_newContiguous(state, output); gradOutput = THCudaTensor_newContiguous(state, gradOutput); THCudaTensor_resizeAs(state, gradInput, output); long batchSize, dim, stride0, stride1 = 1; long blocksY = 1, blocksZ = 1; if (gradInput->nDimension == 1) { batchSize = 1; dim = gradInput->size[0]; stride0 = 1; } else if (gradInput->nDimension == 2) { batchSize = gradInput->size[0]; dim = gradInput->size[1]; stride0 = 1; } else if (gradInput->nDimension == 3) { batchSize = 1; dim = gradInput->size[0]; blocksY = gradInput->size[1]; blocksZ = gradInput->size[2]; stride0 = blocksY * blocksZ; stride1 = blocksZ; } else if (gradInput->nDimension == 4) { batchSize = gradInput->size[0]; dim = gradInput->size[1]; blocksY = gradInput->size[2]; blocksZ = gradInput->size[3]; stride0 = blocksY * blocksZ; stride1 = blocksZ; } else { THError("1D, 2D, 3D or 4D tensor expected"); } // when possible use only 2d grid of thread blocks to stay compatible with compute capability 2.X devices. if (blocksY * blocksZ < 65536) { blocksY *= blocksZ; blocksZ = 1; if (input->nDimension == 3 || input->nDimension == 4) { stride0 = blocksY * blocksZ; stride1 = blocksZ; } } dim3 blocks(batchSize, blocksY, blocksZ); dim3 threads(SOFTMAX_THREADS); hipLaunchKernelGGL(( cunn_SoftMax_updateGradInput_kernel), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), THCudaTensor_data(state, gradInput), THCudaTensor_data(state, output), THCudaTensor_data(state, gradOutput), batchSize, dim, stride0, stride1 ); THCudaCheck(hipGetLastError()); THCudaTensor_free(state, gradOutput); THCudaTensor_free(state, output); } #undef SOFTMAX_THREADS
712d6a30120b1f940866bf8a1e843120ad686d78.cu
#include "THCUNN.h" #include "common.h" #define SOFTMAX_THREADS 128 __global__ void cunn_SoftMax_updateOutput_kernel( float *output, float *input, int nframe, int dim, int stride0, int stride1) { __shared__ float buffer[SOFTMAX_THREADS+1]; float *input_k = input + blockIdx.x*dim*stride0 + blockIdx.y*stride1 + blockIdx.z; float *output_k = output + blockIdx.x*dim*stride0 + blockIdx.y*stride1 + blockIdx.z; int i_start = threadIdx.x; int i_end = dim; int i_step = blockDim.x; // max? buffer[threadIdx.x] = -FLT_MAX; for (int i=i_start; i<i_end; i+=i_step) { float z = input_k[i*stride0]; if (buffer[threadIdx.x] < z) buffer[threadIdx.x] = z; } __syncthreads(); // reduce if (threadIdx.x == 0) { float max_k = -FLT_MAX; for (int i=0; i<blockDim.x; i++) { if (max_k < buffer[i]) max_k = buffer[i]; } buffer[SOFTMAX_THREADS] = max_k; } __syncthreads(); // sum? float max_k = buffer[SOFTMAX_THREADS]; buffer[threadIdx.x] = 0; for (int i=i_start; i<i_end; i+=i_step) { float z = __expf(input_k[i*stride0]-max_k); buffer[threadIdx.x] += z; output_k[i*stride0] = z; } __syncthreads(); // reduce if (threadIdx.x == 0) { float sum_k = 0; for (int i=0; i<blockDim.x; i++) sum_k += buffer[i]; buffer[SOFTMAX_THREADS] = sum_k; } __syncthreads(); // softmax float sum_k = buffer[SOFTMAX_THREADS]; for (int i=i_start; i<i_end; i+=i_step) output_k[i*stride0] = output_k[i*stride0] / sum_k; } __global__ void cunn_SoftMax_updateGradInput_kernel( float *gradInput, float *output, float *gradOutput, int nframe, int dim, int stride0, int stride1) { __shared__ float buffer[SOFTMAX_THREADS]; float *gradInput_k = gradInput + blockIdx.x*dim*stride0 + blockIdx.y * stride1 + blockIdx.z; float *output_k = output + blockIdx.x*dim*stride0 + blockIdx.y * stride1 + blockIdx.z; float *gradOutput_k = gradOutput + blockIdx.x*dim*stride0 + blockIdx.y * stride1 + blockIdx.z; int i_start = threadIdx.x; int i_end = dim; int i_step = blockDim.x; // sum? buffer[threadIdx.x] = 0; for (int i=i_start; i<i_end; i+=i_step) buffer[threadIdx.x] += gradOutput_k[i*stride0] * output_k[i*stride0]; __syncthreads(); // reduce if (threadIdx.x == 0) { float sum_k = 0; for (int i=0; i<blockDim.x; i++) sum_k += buffer[i]; buffer[0] = sum_k; } __syncthreads(); float sum_k = buffer[0]; for (int i=i_start; i<i_end; i+=i_step) gradInput_k[i*stride0] = output_k[i*stride0] * (gradOutput_k[i*stride0] - sum_k); } void THNN_CudaSoftMax_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output) { THCUNN_assertSameGPU(state, 2, input, output); input = THCudaTensor_newContiguous(state, input); THCudaTensor_resizeAs(state, output, input); long batchSize, dim, stride0, stride1 = 1; long blocksY = 1, blocksZ = 1; if (input->nDimension == 1) { batchSize = 1; dim = input->size[0]; stride0 = 1; } else if (input->nDimension == 2) { batchSize = input->size[0]; dim = input->size[1]; stride0 = 1; } else if (input->nDimension == 3) { batchSize = 1; dim = input->size[0]; blocksY = input->size[1]; blocksZ = input->size[2]; stride0 = blocksY * blocksZ; stride1 = blocksZ; } else if (input->nDimension == 4) { batchSize = input->size[0]; dim = input->size[1]; blocksY = input->size[2]; blocksZ = input->size[3]; stride0 = blocksY * blocksZ; stride1 = blocksZ; } else { THError("1D, 2D, 3D or 4D tensor expected"); } // when possible use only 2d grid of thread blocks to stay compatible with compute capability 2.X devices. if (blocksY * blocksZ < 65536) { blocksY *= blocksZ; blocksZ = 1; if (input->nDimension == 3 || input->nDimension == 4) { stride0 = blocksY * blocksZ; stride1 = blocksZ; } } dim3 blocks(batchSize, blocksY, blocksZ); dim3 threads(SOFTMAX_THREADS); cunn_SoftMax_updateOutput_kernel<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>( THCudaTensor_data(state, output), THCudaTensor_data(state, input), batchSize, dim, stride0, stride1 ); THCudaCheck(cudaGetLastError()); THCudaTensor_free(state, input); } void THNN_CudaSoftMax_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *output) { THCUNN_assertSameGPU(state, 3, output, gradOutput, gradInput); output = THCudaTensor_newContiguous(state, output); gradOutput = THCudaTensor_newContiguous(state, gradOutput); THCudaTensor_resizeAs(state, gradInput, output); long batchSize, dim, stride0, stride1 = 1; long blocksY = 1, blocksZ = 1; if (gradInput->nDimension == 1) { batchSize = 1; dim = gradInput->size[0]; stride0 = 1; } else if (gradInput->nDimension == 2) { batchSize = gradInput->size[0]; dim = gradInput->size[1]; stride0 = 1; } else if (gradInput->nDimension == 3) { batchSize = 1; dim = gradInput->size[0]; blocksY = gradInput->size[1]; blocksZ = gradInput->size[2]; stride0 = blocksY * blocksZ; stride1 = blocksZ; } else if (gradInput->nDimension == 4) { batchSize = gradInput->size[0]; dim = gradInput->size[1]; blocksY = gradInput->size[2]; blocksZ = gradInput->size[3]; stride0 = blocksY * blocksZ; stride1 = blocksZ; } else { THError("1D, 2D, 3D or 4D tensor expected"); } // when possible use only 2d grid of thread blocks to stay compatible with compute capability 2.X devices. if (blocksY * blocksZ < 65536) { blocksY *= blocksZ; blocksZ = 1; if (input->nDimension == 3 || input->nDimension == 4) { stride0 = blocksY * blocksZ; stride1 = blocksZ; } } dim3 blocks(batchSize, blocksY, blocksZ); dim3 threads(SOFTMAX_THREADS); cunn_SoftMax_updateGradInput_kernel<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>( THCudaTensor_data(state, gradInput), THCudaTensor_data(state, output), THCudaTensor_data(state, gradOutput), batchSize, dim, stride0, stride1 ); THCudaCheck(cudaGetLastError()); THCudaTensor_free(state, gradOutput); THCudaTensor_free(state, output); } #undef SOFTMAX_THREADS
5d6a573c58f3b10e060bf1f7d95ad91d3171a6b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @author Mark Gates @author Azzam Haidar @generated from magmablas/zlaset.cu, normal z -> d, Sun Nov 20 20:20:29 2016 */ #include "magma_internal.h" #include "batched_kernel_param.h" // To deal with really large matrices, this launchs multiple super blocks, // each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64. // CUDA architecture 2.0 limits each grid dimension to 64K-1. // Instances arose for vectors used by sparse matrices with M > 4194240, though N is small. const magma_int_t max_blocks = 65535; // BLK_X and BLK_Y need to be equal for dlaset_q to deal with diag & offdiag // when looping over super blocks. // Formerly, BLK_X and BLK_Y could be different. #define BLK_X 64 #define BLK_Y BLK_X /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to dlaset, dlacpy, dlag2s, clag2z, dgeadd. */ static __device__ void dlaset_full_device( int m, int n, double offdiag, double diag, double *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag || above diag || offdiag == diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || MAGMA_D_EQUAL( offdiag, diag ))); /* do only rows inside matrix */ if ( ind < m ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block or offdiag == diag #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else A[j*lda] = offdiag; } } } } /******************************************************************************/ /* Similar to dlaset_full, but updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to dlaset, dlacpy, zlat2c, clat2z. */ static __device__ void dlaset_lower_device( int m, int n, double offdiag, double diag, double *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < m && ind + BLK_X > iby ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else if ( ind > iby+j ) A[j*lda] = offdiag; } } } } /******************************************************************************/ /* Similar to dlaset_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to dlaset, dlacpy, zlat2c, clat2z. */ static __device__ void dlaset_upper_device( int m, int n, double offdiag, double diag, double *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < m && ind < iby + BLK_Y ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else if ( ind < iby+j ) A[j*lda] = offdiag; } } } } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void dlaset_full_kernel( int m, int n, double offdiag, double diag, double *dA, int ldda ) { dlaset_full_device(m, n, offdiag, diag, dA, ldda); } __global__ void dlaset_lower_kernel( int m, int n, double offdiag, double diag, double *dA, int ldda ) { dlaset_lower_device(m, n, offdiag, diag, dA, ldda); } __global__ void dlaset_upper_kernel( int m, int n, double offdiag, double diag, double *dA, int ldda ) { dlaset_upper_device(m, n, offdiag, diag, dA, ldda); } /******************************************************************************/ /* kernel wrappers to call the device functions for the batched routine. */ __global__ void dlaset_full_kernel_batched( int m, int n, double offdiag, double diag, double **dAarray, int ldda ) { int batchid = blockIdx.z; dlaset_full_device(m, n, offdiag, diag, dAarray[batchid], ldda); } __global__ void dlaset_lower_kernel_batched( int m, int n, double offdiag, double diag, double **dAarray, int ldda ) { int batchid = blockIdx.z; dlaset_lower_device(m, n, offdiag, diag, dAarray[batchid], ldda); } __global__ void dlaset_upper_kernel_batched( int m, int n, double offdiag, double diag, double **dAarray, int ldda ) { int batchid = blockIdx.z; dlaset_upper_device(m, n, offdiag, diag, dAarray[batchid], ldda); } /******************************************************************************/ /* kernel wrappers to call the device functions for the vbatched routine. */ __global__ void dlaset_full_kernel_vbatched( magma_int_t* m, magma_int_t* n, double offdiag, double diag, double **dAarray, magma_int_t* ldda ) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int my_n = (int)n[batchid]; if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return; if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return; dlaset_full_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]); } __global__ void dlaset_lower_kernel_vbatched( magma_int_t* m, magma_int_t* n, double offdiag, double diag, double **dAarray, magma_int_t* ldda ) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int my_n = (int)n[batchid]; if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return; if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return; dlaset_lower_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]); } __global__ void dlaset_upper_kernel_vbatched( magma_int_t* m, magma_int_t* n, double offdiag, double diag, double **dAarray, magma_int_t* ldda ) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int my_n = (int)n[batchid]; if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return; if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return; dlaset_upper_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]); } /***************************************************************************//** Purpose ------- DLASET initializes a 2-D array A to DIAG on the diagonal and OFFDIAG on the off-diagonals. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA to be set. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part - = MagmaFull: All of the matrix dA @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] offdiag DOUBLE PRECISION The scalar OFFDIAG. (In LAPACK this is called ALPHA.) @param[in] diag DOUBLE PRECISION The scalar DIAG. (In LAPACK this is called BETA.) @param[in] dA DOUBLE PRECISION array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j; and A(i,i) = DIAG, 1 <= i <= min(m,n) @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laset *******************************************************************************/ extern "C" void magmablas_dlaset( magma_uplo_t uplo, magma_int_t m, magma_int_t n, double offdiag, double diag, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m) ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } assert( BLK_X == BLK_Y ); const magma_int_t super_NB = max_blocks*BLK_X; dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) ); dim3 threads( BLK_X, 1 ); dim3 grid; magma_int_t mm, nn; if (uplo == MagmaLower) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block hipLaunchKernelGGL(( dlaset_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block hipLaunchKernelGGL(( dlaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } else if (uplo == MagmaUpper) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block hipLaunchKernelGGL(( dlaset_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block hipLaunchKernelGGL(( dlaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } else { // if continuous in memory & set to zero, hipMemset is faster. // TODO: use hipMemset2D ? if ( m == ldda && MAGMA_D_EQUAL( offdiag, MAGMA_D_ZERO ) && MAGMA_D_EQUAL( diag, MAGMA_D_ZERO ) ) { size_t size = m*n; hipError_t err = hipMemsetAsync( dA, 0, size*sizeof(double), queue->cuda_stream() ); assert( err == hipSuccess ); MAGMA_UNUSED( err ); } else { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y; ++j ) { // full row nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block hipLaunchKernelGGL(( dlaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block hipLaunchKernelGGL(( dlaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } } } /******************************************************************************/ extern "C" void magmablas_dlaset_batched( magma_uplo_t uplo, magma_int_t m, magma_int_t n, double offdiag, double diag, magmaDouble_ptr dAarray[], magma_int_t ldda, magma_int_t batchCount, magma_queue_t queue) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m) ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, 1, 1 ); dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount ); if (uplo == MagmaLower) { hipLaunchKernelGGL(( dlaset_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda); } else if (uplo == MagmaUpper) { hipLaunchKernelGGL(( dlaset_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda); } else { hipLaunchKernelGGL(( dlaset_full_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda); } } /******************************************************************************/ extern "C" void magmablas_dlaset_vbatched( magma_uplo_t uplo, magma_int_t max_m, magma_int_t max_n, magma_int_t* m, magma_int_t* n, double offdiag, double diag, magmaDouble_ptr dAarray[], magma_int_t* ldda, magma_int_t batchCount, magma_queue_t queue) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( max_m < 0 ) info = -2; else if ( max_n < 0 ) info = -3; //else if ( ldda < max(1,m) ) // info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( max_m == 0 || max_n == 0 ) { return; } dim3 threads( BLK_X, 1, 1 ); dim3 grid( magma_ceildiv( max_m, BLK_X ), magma_ceildiv( max_n, BLK_Y ), batchCount ); if (uplo == MagmaLower) { hipLaunchKernelGGL(( dlaset_lower_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda); } else if (uplo == MagmaUpper) { hipLaunchKernelGGL(( dlaset_upper_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda); } else { hipLaunchKernelGGL(( dlaset_full_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda); } }
5d6a573c58f3b10e060bf1f7d95ad91d3171a6b7.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @author Mark Gates @author Azzam Haidar @generated from magmablas/zlaset.cu, normal z -> d, Sun Nov 20 20:20:29 2016 */ #include "magma_internal.h" #include "batched_kernel_param.h" // To deal with really large matrices, this launchs multiple super blocks, // each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64. // CUDA architecture 2.0 limits each grid dimension to 64K-1. // Instances arose for vectors used by sparse matrices with M > 4194240, though N is small. const magma_int_t max_blocks = 65535; // BLK_X and BLK_Y need to be equal for dlaset_q to deal with diag & offdiag // when looping over super blocks. // Formerly, BLK_X and BLK_Y could be different. #define BLK_X 64 #define BLK_Y BLK_X /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to dlaset, dlacpy, dlag2s, clag2z, dgeadd. */ static __device__ void dlaset_full_device( int m, int n, double offdiag, double diag, double *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag || above diag || offdiag == diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || MAGMA_D_EQUAL( offdiag, diag ))); /* do only rows inside matrix */ if ( ind < m ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block or offdiag == diag #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else A[j*lda] = offdiag; } } } } /******************************************************************************/ /* Similar to dlaset_full, but updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to dlaset, dlacpy, zlat2c, clat2z. */ static __device__ void dlaset_lower_device( int m, int n, double offdiag, double diag, double *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < m && ind + BLK_X > iby ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else if ( ind > iby+j ) A[j*lda] = offdiag; } } } } /******************************************************************************/ /* Similar to dlaset_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to dlaset, dlacpy, zlat2c, clat2z. */ static __device__ void dlaset_upper_device( int m, int n, double offdiag, double diag, double *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < m && ind < iby + BLK_Y ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else if ( ind < iby+j ) A[j*lda] = offdiag; } } } } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void dlaset_full_kernel( int m, int n, double offdiag, double diag, double *dA, int ldda ) { dlaset_full_device(m, n, offdiag, diag, dA, ldda); } __global__ void dlaset_lower_kernel( int m, int n, double offdiag, double diag, double *dA, int ldda ) { dlaset_lower_device(m, n, offdiag, diag, dA, ldda); } __global__ void dlaset_upper_kernel( int m, int n, double offdiag, double diag, double *dA, int ldda ) { dlaset_upper_device(m, n, offdiag, diag, dA, ldda); } /******************************************************************************/ /* kernel wrappers to call the device functions for the batched routine. */ __global__ void dlaset_full_kernel_batched( int m, int n, double offdiag, double diag, double **dAarray, int ldda ) { int batchid = blockIdx.z; dlaset_full_device(m, n, offdiag, diag, dAarray[batchid], ldda); } __global__ void dlaset_lower_kernel_batched( int m, int n, double offdiag, double diag, double **dAarray, int ldda ) { int batchid = blockIdx.z; dlaset_lower_device(m, n, offdiag, diag, dAarray[batchid], ldda); } __global__ void dlaset_upper_kernel_batched( int m, int n, double offdiag, double diag, double **dAarray, int ldda ) { int batchid = blockIdx.z; dlaset_upper_device(m, n, offdiag, diag, dAarray[batchid], ldda); } /******************************************************************************/ /* kernel wrappers to call the device functions for the vbatched routine. */ __global__ void dlaset_full_kernel_vbatched( magma_int_t* m, magma_int_t* n, double offdiag, double diag, double **dAarray, magma_int_t* ldda ) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int my_n = (int)n[batchid]; if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return; if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return; dlaset_full_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]); } __global__ void dlaset_lower_kernel_vbatched( magma_int_t* m, magma_int_t* n, double offdiag, double diag, double **dAarray, magma_int_t* ldda ) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int my_n = (int)n[batchid]; if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return; if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return; dlaset_lower_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]); } __global__ void dlaset_upper_kernel_vbatched( magma_int_t* m, magma_int_t* n, double offdiag, double diag, double **dAarray, magma_int_t* ldda ) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int my_n = (int)n[batchid]; if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return; if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return; dlaset_upper_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]); } /***************************************************************************//** Purpose ------- DLASET initializes a 2-D array A to DIAG on the diagonal and OFFDIAG on the off-diagonals. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA to be set. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part - = MagmaFull: All of the matrix dA @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] offdiag DOUBLE PRECISION The scalar OFFDIAG. (In LAPACK this is called ALPHA.) @param[in] diag DOUBLE PRECISION The scalar DIAG. (In LAPACK this is called BETA.) @param[in] dA DOUBLE PRECISION array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j; and A(i,i) = DIAG, 1 <= i <= min(m,n) @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laset *******************************************************************************/ extern "C" void magmablas_dlaset( magma_uplo_t uplo, magma_int_t m, magma_int_t n, double offdiag, double diag, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m) ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } assert( BLK_X == BLK_Y ); const magma_int_t super_NB = max_blocks*BLK_X; dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) ); dim3 threads( BLK_X, 1 ); dim3 grid; magma_int_t mm, nn; if (uplo == MagmaLower) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block dlaset_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block dlaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } else if (uplo == MagmaUpper) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block dlaset_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block dlaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } else { // if continuous in memory & set to zero, cudaMemset is faster. // TODO: use cudaMemset2D ? if ( m == ldda && MAGMA_D_EQUAL( offdiag, MAGMA_D_ZERO ) && MAGMA_D_EQUAL( diag, MAGMA_D_ZERO ) ) { size_t size = m*n; cudaError_t err = cudaMemsetAsync( dA, 0, size*sizeof(double), queue->cuda_stream() ); assert( err == cudaSuccess ); MAGMA_UNUSED( err ); } else { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y; ++j ) { // full row nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block dlaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block dlaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } } } /******************************************************************************/ extern "C" void magmablas_dlaset_batched( magma_uplo_t uplo, magma_int_t m, magma_int_t n, double offdiag, double diag, magmaDouble_ptr dAarray[], magma_int_t ldda, magma_int_t batchCount, magma_queue_t queue) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m) ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, 1, 1 ); dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount ); if (uplo == MagmaLower) { dlaset_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda); } else if (uplo == MagmaUpper) { dlaset_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda); } else { dlaset_full_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda); } } /******************************************************************************/ extern "C" void magmablas_dlaset_vbatched( magma_uplo_t uplo, magma_int_t max_m, magma_int_t max_n, magma_int_t* m, magma_int_t* n, double offdiag, double diag, magmaDouble_ptr dAarray[], magma_int_t* ldda, magma_int_t batchCount, magma_queue_t queue) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( max_m < 0 ) info = -2; else if ( max_n < 0 ) info = -3; //else if ( ldda < max(1,m) ) // info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( max_m == 0 || max_n == 0 ) { return; } dim3 threads( BLK_X, 1, 1 ); dim3 grid( magma_ceildiv( max_m, BLK_X ), magma_ceildiv( max_n, BLK_Y ), batchCount ); if (uplo == MagmaLower) { dlaset_lower_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda); } else if (uplo == MagmaUpper) { dlaset_upper_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda); } else { dlaset_full_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda); } }
048bf899875a8787b9fddd526dba851c0ba1cd48.hip
// !!! This is a file automatically generated by hipify!!! // includes #include <stdio.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> //-------------Funcion sumar velocidad __global__ void sumarvelocidad(float * pdist,int * pvec,float * psum, int node) { int nvec=9; //numero de vecinos int ndist=9; //numero de funcion de distribucion int k=0; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x<node){ //para que se paralelice en cada nodo if (y<nvec){ //para que se paralelice en cada vecino for(k=0;k<ndist;k++){ //para cada velocidad realizo la suma al no saber como paralelizar esta parte psum[(x*ndist+k)]+= pdist[((pvec[(x*nvec+y)])*ndist+k)]; } } } } // nodo == x //vecino == y //velocidad == k
048bf899875a8787b9fddd526dba851c0ba1cd48.cu
// includes #include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda_runtime.h> //-------------Funcion sumar velocidad __global__ void sumarvelocidad(float * pdist,int * pvec,float * psum, int node) { int nvec=9; //numero de vecinos int ndist=9; //numero de funcion de distribucion int k=0; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x<node){ //para que se paralelice en cada nodo if (y<nvec){ //para que se paralelice en cada vecino for(k=0;k<ndist;k++){ //para cada velocidad realizo la suma al no saber como paralelizar esta parte psum[(x*ndist+k)]+= pdist[((pvec[(x*nvec+y)])*ndist+k)]; } } } } // nodo == x //vecino == y //velocidad == k
c39bf148e213f4eea32ce99d8c9236a35dc1911d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/math/sequence2batch.h" namespace paddle { namespace operators { namespace math { template <typename T, int BlockDimX, int BlockDimY, int GridDimX> __global__ void CopyMatrixRowsKernel(const T* src, T* dst, const size_t* index, int64_t height, int64_t width, bool is_src_index) { int idx = threadIdx.x; int idy = threadIdx.y; int id = blockIdx.x + idy * GridDimX; while (id < height) { int src_idx = is_src_index ? index[id] : id; int dst_idx = is_src_index ? id : index[id]; const T* src_data = src + src_idx * width; T* dst_data = dst + dst_idx * width; for (int i = idx; i < width; i += BlockDimX) { dst_data[i] = src_data[i]; } id += BlockDimY * GridDimX; } } template <typename T> class CopyMatrixRowsFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& src, framework::Vector<size_t> index_lod, framework::Tensor& dst, bool is_src_index) { size_t* index = index_lod.cuda_data(); auto src_dims = src.dims(); auto dst_dims = dst.dims(); PADDLE_ENFORCE_EQ(src_dims.size(), 2, "The src must be matrix with rank 2."); PADDLE_ENFORCE_EQ(dst_dims.size(), 2, "The dst must be matrix with rank 2."); PADDLE_ENFORCE_EQ(src_dims[1], dst_dims[1], "The width of src and dst must be same."); auto height = dst_dims[0]; auto width = dst_dims[1]; auto* src_data = src.data<T>(); auto* dst_data = dst.data<T>(); dim3 threads(128, 8); dim3 grid(8, 1); auto stream = context.stream(); hipLaunchKernelGGL(( CopyMatrixRowsKernel<T, 128, 8, 8>), dim3(grid), dim3(threads), 0, stream, src_data, dst_data, index, height, width, is_src_index); } }; template class CopyMatrixRowsFunctor<platform::CUDADeviceContext, float>; template class CopyMatrixRowsFunctor<platform::CUDADeviceContext, double>; template class LoDTensor2BatchFunctor<platform::CUDADeviceContext, float>; template class LoDTensor2BatchFunctor<platform::CUDADeviceContext, double>; template class Batch2LoDTensorFunctor<platform::CUDADeviceContext, float>; template class Batch2LoDTensorFunctor<platform::CUDADeviceContext, double>; } // namespace math } // namespace operators } // namespace paddle
c39bf148e213f4eea32ce99d8c9236a35dc1911d.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/math/sequence2batch.h" namespace paddle { namespace operators { namespace math { template <typename T, int BlockDimX, int BlockDimY, int GridDimX> __global__ void CopyMatrixRowsKernel(const T* src, T* dst, const size_t* index, int64_t height, int64_t width, bool is_src_index) { int idx = threadIdx.x; int idy = threadIdx.y; int id = blockIdx.x + idy * GridDimX; while (id < height) { int src_idx = is_src_index ? index[id] : id; int dst_idx = is_src_index ? id : index[id]; const T* src_data = src + src_idx * width; T* dst_data = dst + dst_idx * width; for (int i = idx; i < width; i += BlockDimX) { dst_data[i] = src_data[i]; } id += BlockDimY * GridDimX; } } template <typename T> class CopyMatrixRowsFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& src, framework::Vector<size_t> index_lod, framework::Tensor& dst, bool is_src_index) { size_t* index = index_lod.cuda_data(); auto src_dims = src.dims(); auto dst_dims = dst.dims(); PADDLE_ENFORCE_EQ(src_dims.size(), 2, "The src must be matrix with rank 2."); PADDLE_ENFORCE_EQ(dst_dims.size(), 2, "The dst must be matrix with rank 2."); PADDLE_ENFORCE_EQ(src_dims[1], dst_dims[1], "The width of src and dst must be same."); auto height = dst_dims[0]; auto width = dst_dims[1]; auto* src_data = src.data<T>(); auto* dst_data = dst.data<T>(); dim3 threads(128, 8); dim3 grid(8, 1); auto stream = context.stream(); CopyMatrixRowsKernel<T, 128, 8, 8><<<grid, threads, 0, stream>>>( src_data, dst_data, index, height, width, is_src_index); } }; template class CopyMatrixRowsFunctor<platform::CUDADeviceContext, float>; template class CopyMatrixRowsFunctor<platform::CUDADeviceContext, double>; template class LoDTensor2BatchFunctor<platform::CUDADeviceContext, float>; template class LoDTensor2BatchFunctor<platform::CUDADeviceContext, double>; template class Batch2LoDTensorFunctor<platform::CUDADeviceContext, float>; template class Batch2LoDTensorFunctor<platform::CUDADeviceContext, double>; } // namespace math } // namespace operators } // namespace paddle
5fec4e265ce5a69821cffe2d077a2079922b7a8c.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/Dispatch.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> constexpr float EPSILON = 1e-12; namespace { using namespace at; void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) { at::TensorIterator iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad) .add_input(input) .add_input(target) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() { at::native::gpu_kernel(iter, [] GPU_LAMBDA ( scalar_t grad_val, scalar_t input_val, scalar_t target_val ) -> scalar_t { const scalar_t one = 1; const scalar_t epsilon = EPSILON; scalar_t grad_input_denominator = max( (one - input_val) * input_val, epsilon ); return grad_val * (input_val - target_val) / grad_input_denominator; } ); }); } } // namespace namespace at { namespace native { Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction, bool log_target) { auto grad_input = at::empty_like(input); if (!log_target) { TensorIterator iter = TensorIteratorConfig() .add_output(grad_input) .add_input(target) .add_input(grad) .build(); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "kl_div_backward_cuda", [&]() { scalar_t inv = (reduction == at::Reduction::Mean) ? scalar_t(1.0 / input.numel()) : scalar_t(1.0); gpu_kernel(iter, [inv] GPU_LAMBDA (scalar_t target_val, scalar_t grad_val) { return (target_val > 0) ? scalar_t(-target_val * grad_val * inv) : scalar_t(0.0); }); }); } else { grad_input = -at::exp(target) * grad; if (reduction == at::Reduction::Mean) { grad_input /= input.numel(); } } return grad_input; } Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) { Tensor loss = at::empty_like(input); return at::native::binary_cross_entropy_out_cuda(loss, input, target, weight, reduction); } Tensor& binary_cross_entropy_out_cuda(Tensor& loss, const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) { Tensor loss_squeezed = at::squeeze(loss); TensorIterator iter = TensorIteratorConfig() .add_output(loss_squeezed) .add_input(at::squeeze(input)) .add_input(at::squeeze(target)) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t { const scalar_t zero = 0; const scalar_t one = 1; const scalar_t neg_100 = -100; CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one); scalar_t log_input_val = ::log(input_val); scalar_t log_1_minus_input_val = ::log(one - input_val); log_input_val = ::max(log_input_val, neg_100); log_1_minus_input_val = ::max(log_1_minus_input_val, neg_100); return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val); } ); }); if (weight.defined()) { loss.mul_(weight); } if (reduction != at::Reduction::None) { Tensor loss_reduced; if (reduction == at::Reduction::Mean) { loss_reduced = loss.mean(); } else if (reduction == at::Reduction::Sum) { loss_reduced = loss.sum(); } loss.resize_as_(loss_reduced).copy_(loss_reduced); } return loss; } Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) { Tensor grad_input = at::empty_like(input); return at::native::binary_cross_entropy_backward_out_cuda(grad_input, grad, input, target, weight, reduction); } Tensor& binary_cross_entropy_backward_out_cuda(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) { Tensor grad_expand = grad.expand_as(input); binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target); if (weight.defined()) { grad_input.mul_(weight); } if (reduction == at::Reduction::Mean) { grad_input.div_(input.numel()); } return grad_input; } }} // namespace at::native
5fec4e265ce5a69821cffe2d077a2079922b7a8c.cu
#include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/Dispatch.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> constexpr float EPSILON = 1e-12; namespace { using namespace at; void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) { at::TensorIterator iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad) .add_input(input) .add_input(target) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() { at::native::gpu_kernel(iter, [] GPU_LAMBDA ( scalar_t grad_val, scalar_t input_val, scalar_t target_val ) -> scalar_t { const scalar_t one = 1; const scalar_t epsilon = EPSILON; scalar_t grad_input_denominator = max( (one - input_val) * input_val, epsilon ); return grad_val * (input_val - target_val) / grad_input_denominator; } ); }); } } // namespace namespace at { namespace native { Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction, bool log_target) { auto grad_input = at::empty_like(input); if (!log_target) { TensorIterator iter = TensorIteratorConfig() .add_output(grad_input) .add_input(target) .add_input(grad) .build(); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "kl_div_backward_cuda", [&]() { scalar_t inv = (reduction == at::Reduction::Mean) ? scalar_t(1.0 / input.numel()) : scalar_t(1.0); gpu_kernel(iter, [inv] GPU_LAMBDA (scalar_t target_val, scalar_t grad_val) { return (target_val > 0) ? scalar_t(-target_val * grad_val * inv) : scalar_t(0.0); }); }); } else { grad_input = -at::exp(target) * grad; if (reduction == at::Reduction::Mean) { grad_input /= input.numel(); } } return grad_input; } Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) { Tensor loss = at::empty_like(input); return at::native::binary_cross_entropy_out_cuda(loss, input, target, weight, reduction); } Tensor& binary_cross_entropy_out_cuda(Tensor& loss, const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) { Tensor loss_squeezed = at::squeeze(loss); TensorIterator iter = TensorIteratorConfig() .add_output(loss_squeezed) .add_input(at::squeeze(input)) .add_input(at::squeeze(target)) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t { const scalar_t zero = 0; const scalar_t one = 1; const scalar_t neg_100 = -100; CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one); scalar_t log_input_val = std::log(input_val); scalar_t log_1_minus_input_val = std::log(one - input_val); log_input_val = std::max(log_input_val, neg_100); log_1_minus_input_val = std::max(log_1_minus_input_val, neg_100); return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val); } ); }); if (weight.defined()) { loss.mul_(weight); } if (reduction != at::Reduction::None) { Tensor loss_reduced; if (reduction == at::Reduction::Mean) { loss_reduced = loss.mean(); } else if (reduction == at::Reduction::Sum) { loss_reduced = loss.sum(); } loss.resize_as_(loss_reduced).copy_(loss_reduced); } return loss; } Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) { Tensor grad_input = at::empty_like(input); return at::native::binary_cross_entropy_backward_out_cuda(grad_input, grad, input, target, weight, reduction); } Tensor& binary_cross_entropy_backward_out_cuda(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) { Tensor grad_expand = grad.expand_as(input); binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target); if (weight.defined()) { grad_input.mul_(weight); } if (reduction == at::Reduction::Mean) { grad_input.div_(input.numel()); } return grad_input; } }} // namespace at::native
e2b2e186d49ed673c0fe14c5974b7124397d21af.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This work is part of the Core Imaging Library developed by Visual Analytics and Imaging System Group of the Science Technology Facilities Council, STFC Copyright 2017 Daniil Kazantsev Copyright 2017 Srikanth Nagella, Edoardo Pasca Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "TGV_GPU_core.h" #include "shared.h" /* CUDA implementation of Primal-Dual denoising method for * Total Generilized Variation (TGV)-L2 model [1] (2D case only) * * Input Parameters: * 1. Noisy image (2D) * 2. lambda - regularisation parameter * 3. parameter to control the first-order term (alpha1) * 4. parameter to control the second-order term (alpha0) * 5. Number of Chambolle-Pock (Primal-Dual) iterations * 6. Lipshitz constant (default is 12) * * Output: * Filtered/regulariaed image * * References: * [1] K. Bredies "Total Generalized Variation" */ #define BLKXSIZE2D 16 #define BLKYSIZE2D 16 #define EPS 1.0e-7 #define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) ) /********************************************************************/ /***************************2D Functions*****************************/ /********************************************************************/ __global__ void DualP_2D_kernel(float *U, float *V1, float *V2, float *P1, float *P2, int dimX, int dimY, float sigma) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { /* symmetric boundary conditions (Neuman) */ if (i == dimX-1) P1[index] += sigma*((U[j*dimX+(i-1)] - U[index]) - V1[index]); else P1[index] += sigma*((U[j*dimX+(i+1)] - U[index]) - V1[index]); if (j == dimY-1) P2[index] += sigma*((U[(j-1)*dimX+i] - U[index]) - V2[index]); else P2[index] += sigma*((U[(j+1)*dimX+i] - U[index]) - V2[index]); } return; } __global__ void ProjP_2D_kernel(float *P1, float *P2, int dimX, int dimY, float alpha1) { float grad_magn; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { grad_magn = sqrt(pow(P1[index],2) + pow(P2[index],2)); grad_magn = grad_magn/alpha1; if (grad_magn > 1.0) { P1[index] /= grad_magn; P2[index] /= grad_magn; } } return; } __global__ void DualQ_2D_kernel(float *V1, float *V2, float *Q1, float *Q2, float *Q3, int dimX, int dimY, float sigma) { float q1, q2, q11, q22; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { /* symmetric boundary conditions (Neuman) */ if (i == dimX-1) { q1 = (V1[j*dimX+(i-1)] - V1[index]); q11 = (V2[j*dimX+(i-1)] - V2[index]); } else { q1 = (V1[j*dimX+(i+1)] - V1[index]); q11 = (V2[j*dimX+(i+1)] - V2[index]); } if (j == dimY-1) { q2 = (V2[(j-1)*dimX+i] - V2[index]); q22 = (V1[(j-1)*dimX+i] - V1[index]); } else { q2 = V2[(j+1)*dimX+i] - V2[index]; q22 = V1[(j+1)*dimX+i] - V1[index]; } Q1[index] += sigma*(q1); Q2[index] += sigma*(q2); Q3[index] += sigma*(0.5f*(q11 + q22)); } return; } __global__ void ProjQ_2D_kernel(float *Q1, float *Q2, float *Q3, int dimX, int dimY, float alpha0) { float grad_magn; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { grad_magn = sqrt(pow(Q1[index],2) + pow(Q2[index],2) + 2*pow(Q3[index],2)); grad_magn = grad_magn/alpha0; if (grad_magn > 1.0) { Q1[index] /= grad_magn; Q2[index] /= grad_magn; Q3[index] /= grad_magn; } } return; } __global__ void DivProjP_2D_kernel(float *U, float *U0, float *P1, float *P2, int dimX, int dimY, float lambda, float tau) { float P_v1, P_v2, div; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { if (i == 0) P_v1 = P1[index]; else P_v1 = P1[index] - P1[j*dimX+(i-1)]; if (j == 0) P_v2 = P2[index]; else P_v2 = P2[index] - P2[(j-1)*dimX+i]; div = P_v1 + P_v2; U[index] = (lambda*(U[index] + tau*div) + tau*U0[index])/(lambda + tau); } return; } __global__ void UpdV_2D_kernel(float *V1, float *V2, float *P1, float *P2, float *Q1, float *Q2, float *Q3, int dimX, int dimY, float tau) { float q1, q11, q2, q22, div1, div2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { /* symmetric boundary conditions (Neuman) */ if (i == 0) { q1 = Q1[index]; q11 = Q3[index]; } else { q1 = Q1[index] - Q1[j*dimX+(i-1)]; q11 = Q3[index] - Q3[j*dimX+(i-1)]; } if (j == 0) { q2 = Q2[index]; q22 = Q3[index]; } else { q2 = Q2[index] - Q2[(j-1)*dimX+i]; q22 = Q3[index] - Q3[(j-1)*dimX+i]; } div1 = q1 + q22; div2 = q2 + q11; V1[index] += tau*(P1[index] + div1); V2[index] += tau*(P2[index] + div2); } return; } __global__ void copyIm_TGV_kernel(float *Input, float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { Output[index] = Input[index]; } } __global__ void newU_kernel(float *U, float *U_old, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { U[index] = 2.0f*U[index] - U_old[index]; } } /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ /********************* MAIN HOST FUNCTION ******************/ /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ extern "C" int TGV_GPU_main(float *U0, float *U, float lambda, float alpha1, float alpha0, int iterationsNumb, float L2, int dimX, int dimY) { int dimTotal, dev = 0; CHECK(hipSetDevice(dev)); dimTotal = dimX*dimY; float *U_old, *d_U0, *d_U, *P1, *P2, *Q1, *Q2, *Q3, *V1, *V1_old, *V2, *V2_old, tau, sigma; tau = pow(L2,-0.5); sigma = pow(L2,-0.5); CHECK(hipMalloc((void**)&d_U0,dimTotal*sizeof(float))); CHECK(hipMalloc((void**)&d_U,dimTotal*sizeof(float))); CHECK(hipMalloc((void**)&U_old,dimTotal*sizeof(float))); CHECK(hipMalloc((void**)&P1,dimTotal*sizeof(float))); CHECK(hipMalloc((void**)&P2,dimTotal*sizeof(float))); CHECK(hipMalloc((void**)&Q1,dimTotal*sizeof(float))); CHECK(hipMalloc((void**)&Q2,dimTotal*sizeof(float))); CHECK(hipMalloc((void**)&Q3,dimTotal*sizeof(float))); CHECK(hipMalloc((void**)&V1,dimTotal*sizeof(float))); CHECK(hipMalloc((void**)&V2,dimTotal*sizeof(float))); CHECK(hipMalloc((void**)&V1_old,dimTotal*sizeof(float))); CHECK(hipMalloc((void**)&V2_old,dimTotal*sizeof(float))); CHECK(hipMemcpy(d_U0,U0,dimTotal*sizeof(float),hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_U,U0,dimTotal*sizeof(float),hipMemcpyHostToDevice)); /*2D case */ dim3 dimBlock(BLKXSIZE2D,BLKYSIZE2D); dim3 dimGrid(idivup(dimX,BLKXSIZE2D), idivup(dimY,BLKYSIZE2D)); for(int n=0; n < iterationsNumb; n++) { /* Calculate Dual Variable P */ hipLaunchKernelGGL(( DualP_2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_U, V1, V2, P1, P2, dimX, dimY, sigma); CHECK(hipDeviceSynchronize()); /*Projection onto convex set for P*/ hipLaunchKernelGGL(( ProjP_2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, dimX, dimY, alpha1); CHECK(hipDeviceSynchronize()); /* Calculate Dual Variable Q */ hipLaunchKernelGGL(( DualQ_2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, V1, V2, Q1, Q2, Q3, dimX, dimY, sigma); CHECK(hipDeviceSynchronize()); /*Projection onto convex set for Q*/ hipLaunchKernelGGL(( ProjQ_2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Q1, Q2, Q3, dimX, dimY, alpha0); CHECK(hipDeviceSynchronize()); /*saving U into U_old*/ hipLaunchKernelGGL(( copyIm_TGV_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_U, U_old, dimX, dimY, dimTotal); CHECK(hipDeviceSynchronize()); /*adjoint operation -> divergence and projection of P*/ hipLaunchKernelGGL(( DivProjP_2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_U, d_U0, P1, P2, dimX, dimY, lambda, tau); CHECK(hipDeviceSynchronize()); /*get updated solution U*/ hipLaunchKernelGGL(( newU_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_U, U_old, dimX, dimY, dimTotal); CHECK(hipDeviceSynchronize()); /*saving V into V_old*/ hipLaunchKernelGGL(( copyIm_TGV_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, V1, V1_old, dimX, dimY, dimTotal); hipLaunchKernelGGL(( copyIm_TGV_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, V2, V2_old, dimX, dimY, dimTotal); CHECK(hipDeviceSynchronize()); /* upd V*/ hipLaunchKernelGGL(( UpdV_2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, V1, V2, P1, P2, Q1, Q2, Q3, dimX, dimY, tau); CHECK(hipDeviceSynchronize()); /*get new V*/ hipLaunchKernelGGL(( newU_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, V1, V1_old, dimX, dimY, dimTotal); hipLaunchKernelGGL(( newU_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, V2, V2_old, dimX, dimY, dimTotal); CHECK(hipDeviceSynchronize()); } CHECK(hipMemcpy(U,d_U,dimTotal*sizeof(float),hipMemcpyDeviceToHost)); CHECK(hipFree(d_U0)); CHECK(hipFree(d_U)); CHECK(hipFree(U_old)); CHECK(hipFree(P1)); CHECK(hipFree(P2)); CHECK(hipFree(Q1)); CHECK(hipFree(Q2)); CHECK(hipFree(Q3)); CHECK(hipFree(V1)); CHECK(hipFree(V2)); CHECK(hipFree(V1_old)); CHECK(hipFree(V2_old)); return 0; }
e2b2e186d49ed673c0fe14c5974b7124397d21af.cu
/* This work is part of the Core Imaging Library developed by Visual Analytics and Imaging System Group of the Science Technology Facilities Council, STFC Copyright 2017 Daniil Kazantsev Copyright 2017 Srikanth Nagella, Edoardo Pasca Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "TGV_GPU_core.h" #include "shared.h" /* CUDA implementation of Primal-Dual denoising method for * Total Generilized Variation (TGV)-L2 model [1] (2D case only) * * Input Parameters: * 1. Noisy image (2D) * 2. lambda - regularisation parameter * 3. parameter to control the first-order term (alpha1) * 4. parameter to control the second-order term (alpha0) * 5. Number of Chambolle-Pock (Primal-Dual) iterations * 6. Lipshitz constant (default is 12) * * Output: * Filtered/regulariaed image * * References: * [1] K. Bredies "Total Generalized Variation" */ #define BLKXSIZE2D 16 #define BLKYSIZE2D 16 #define EPS 1.0e-7 #define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) ) /********************************************************************/ /***************************2D Functions*****************************/ /********************************************************************/ __global__ void DualP_2D_kernel(float *U, float *V1, float *V2, float *P1, float *P2, int dimX, int dimY, float sigma) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { /* symmetric boundary conditions (Neuman) */ if (i == dimX-1) P1[index] += sigma*((U[j*dimX+(i-1)] - U[index]) - V1[index]); else P1[index] += sigma*((U[j*dimX+(i+1)] - U[index]) - V1[index]); if (j == dimY-1) P2[index] += sigma*((U[(j-1)*dimX+i] - U[index]) - V2[index]); else P2[index] += sigma*((U[(j+1)*dimX+i] - U[index]) - V2[index]); } return; } __global__ void ProjP_2D_kernel(float *P1, float *P2, int dimX, int dimY, float alpha1) { float grad_magn; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { grad_magn = sqrt(pow(P1[index],2) + pow(P2[index],2)); grad_magn = grad_magn/alpha1; if (grad_magn > 1.0) { P1[index] /= grad_magn; P2[index] /= grad_magn; } } return; } __global__ void DualQ_2D_kernel(float *V1, float *V2, float *Q1, float *Q2, float *Q3, int dimX, int dimY, float sigma) { float q1, q2, q11, q22; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { /* symmetric boundary conditions (Neuman) */ if (i == dimX-1) { q1 = (V1[j*dimX+(i-1)] - V1[index]); q11 = (V2[j*dimX+(i-1)] - V2[index]); } else { q1 = (V1[j*dimX+(i+1)] - V1[index]); q11 = (V2[j*dimX+(i+1)] - V2[index]); } if (j == dimY-1) { q2 = (V2[(j-1)*dimX+i] - V2[index]); q22 = (V1[(j-1)*dimX+i] - V1[index]); } else { q2 = V2[(j+1)*dimX+i] - V2[index]; q22 = V1[(j+1)*dimX+i] - V1[index]; } Q1[index] += sigma*(q1); Q2[index] += sigma*(q2); Q3[index] += sigma*(0.5f*(q11 + q22)); } return; } __global__ void ProjQ_2D_kernel(float *Q1, float *Q2, float *Q3, int dimX, int dimY, float alpha0) { float grad_magn; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { grad_magn = sqrt(pow(Q1[index],2) + pow(Q2[index],2) + 2*pow(Q3[index],2)); grad_magn = grad_magn/alpha0; if (grad_magn > 1.0) { Q1[index] /= grad_magn; Q2[index] /= grad_magn; Q3[index] /= grad_magn; } } return; } __global__ void DivProjP_2D_kernel(float *U, float *U0, float *P1, float *P2, int dimX, int dimY, float lambda, float tau) { float P_v1, P_v2, div; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { if (i == 0) P_v1 = P1[index]; else P_v1 = P1[index] - P1[j*dimX+(i-1)]; if (j == 0) P_v2 = P2[index]; else P_v2 = P2[index] - P2[(j-1)*dimX+i]; div = P_v1 + P_v2; U[index] = (lambda*(U[index] + tau*div) + tau*U0[index])/(lambda + tau); } return; } __global__ void UpdV_2D_kernel(float *V1, float *V2, float *P1, float *P2, float *Q1, float *Q2, float *Q3, int dimX, int dimY, float tau) { float q1, q11, q2, q22, div1, div2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { /* symmetric boundary conditions (Neuman) */ if (i == 0) { q1 = Q1[index]; q11 = Q3[index]; } else { q1 = Q1[index] - Q1[j*dimX+(i-1)]; q11 = Q3[index] - Q3[j*dimX+(i-1)]; } if (j == 0) { q2 = Q2[index]; q22 = Q3[index]; } else { q2 = Q2[index] - Q2[(j-1)*dimX+i]; q22 = Q3[index] - Q3[(j-1)*dimX+i]; } div1 = q1 + q22; div2 = q2 + q11; V1[index] += tau*(P1[index] + div1); V2[index] += tau*(P2[index] + div2); } return; } __global__ void copyIm_TGV_kernel(float *Input, float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { Output[index] = Input[index]; } } __global__ void newU_kernel(float *U, float *U_old, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { U[index] = 2.0f*U[index] - U_old[index]; } } /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ /********************* MAIN HOST FUNCTION ******************/ /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ extern "C" int TGV_GPU_main(float *U0, float *U, float lambda, float alpha1, float alpha0, int iterationsNumb, float L2, int dimX, int dimY) { int dimTotal, dev = 0; CHECK(cudaSetDevice(dev)); dimTotal = dimX*dimY; float *U_old, *d_U0, *d_U, *P1, *P2, *Q1, *Q2, *Q3, *V1, *V1_old, *V2, *V2_old, tau, sigma; tau = pow(L2,-0.5); sigma = pow(L2,-0.5); CHECK(cudaMalloc((void**)&d_U0,dimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&d_U,dimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&U_old,dimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&P1,dimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&P2,dimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&Q1,dimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&Q2,dimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&Q3,dimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&V1,dimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&V2,dimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&V1_old,dimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&V2_old,dimTotal*sizeof(float))); CHECK(cudaMemcpy(d_U0,U0,dimTotal*sizeof(float),cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_U,U0,dimTotal*sizeof(float),cudaMemcpyHostToDevice)); /*2D case */ dim3 dimBlock(BLKXSIZE2D,BLKYSIZE2D); dim3 dimGrid(idivup(dimX,BLKXSIZE2D), idivup(dimY,BLKYSIZE2D)); for(int n=0; n < iterationsNumb; n++) { /* Calculate Dual Variable P */ DualP_2D_kernel<<<dimGrid,dimBlock>>>(d_U, V1, V2, P1, P2, dimX, dimY, sigma); CHECK(cudaDeviceSynchronize()); /*Projection onto convex set for P*/ ProjP_2D_kernel<<<dimGrid,dimBlock>>>(P1, P2, dimX, dimY, alpha1); CHECK(cudaDeviceSynchronize()); /* Calculate Dual Variable Q */ DualQ_2D_kernel<<<dimGrid,dimBlock>>>(V1, V2, Q1, Q2, Q3, dimX, dimY, sigma); CHECK(cudaDeviceSynchronize()); /*Projection onto convex set for Q*/ ProjQ_2D_kernel<<<dimGrid,dimBlock>>>(Q1, Q2, Q3, dimX, dimY, alpha0); CHECK(cudaDeviceSynchronize()); /*saving U into U_old*/ copyIm_TGV_kernel<<<dimGrid,dimBlock>>>(d_U, U_old, dimX, dimY, dimTotal); CHECK(cudaDeviceSynchronize()); /*adjoint operation -> divergence and projection of P*/ DivProjP_2D_kernel<<<dimGrid,dimBlock>>>(d_U, d_U0, P1, P2, dimX, dimY, lambda, tau); CHECK(cudaDeviceSynchronize()); /*get updated solution U*/ newU_kernel<<<dimGrid,dimBlock>>>(d_U, U_old, dimX, dimY, dimTotal); CHECK(cudaDeviceSynchronize()); /*saving V into V_old*/ copyIm_TGV_kernel<<<dimGrid,dimBlock>>>(V1, V1_old, dimX, dimY, dimTotal); copyIm_TGV_kernel<<<dimGrid,dimBlock>>>(V2, V2_old, dimX, dimY, dimTotal); CHECK(cudaDeviceSynchronize()); /* upd V*/ UpdV_2D_kernel<<<dimGrid,dimBlock>>>(V1, V2, P1, P2, Q1, Q2, Q3, dimX, dimY, tau); CHECK(cudaDeviceSynchronize()); /*get new V*/ newU_kernel<<<dimGrid,dimBlock>>>(V1, V1_old, dimX, dimY, dimTotal); newU_kernel<<<dimGrid,dimBlock>>>(V2, V2_old, dimX, dimY, dimTotal); CHECK(cudaDeviceSynchronize()); } CHECK(cudaMemcpy(U,d_U,dimTotal*sizeof(float),cudaMemcpyDeviceToHost)); CHECK(cudaFree(d_U0)); CHECK(cudaFree(d_U)); CHECK(cudaFree(U_old)); CHECK(cudaFree(P1)); CHECK(cudaFree(P2)); CHECK(cudaFree(Q1)); CHECK(cudaFree(Q2)); CHECK(cudaFree(Q3)); CHECK(cudaFree(V1)); CHECK(cudaFree(V2)); CHECK(cudaFree(V1_old)); CHECK(cudaFree(V2_old)); return 0; }
c21e8618034506b9bef14e1526d1e6f82aacf9f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Definition Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include <stdio.h> #define REDUCE(a, b) (make_float2(fmaxf(a.x,b.x),fminf(a.y,b.y))) #define READ_AND_MAP(i) (make_float2(data[i],data[i])) #define WORKGROUP_SIZE 256 /** * \brief max_min_global_stage1: Look for the maximum an the minimum of an array. stage1 * * optimal workgroup size: 2^n greater than sqrt(SIZE), limited to 512 * optimal total item size: (workgroup size)^2 * if SIZE >total item size: adjust seq_count. * * @param data: Float pointer to global memory storing the vector of data. * @param out: Float2 pointer to global memory storing the temporary results (workgroup size) * @param seq_count: how many blocksize each thread should read * @param SIZE: size of the * **/ __global__ void max_min_stage1( const float *data, float2 *out, unsigned int SIZE){ __shared__ float2 ldata[WORKGROUP_SIZE]; unsigned int group_size = min((unsigned int) blockDim.x, (unsigned int) WORKGROUP_SIZE); unsigned int lid = threadIdx.x; float2 acc; unsigned int big_block = group_size * gridDim.x; unsigned int i = lid + group_size * blockIdx.x; if (lid<SIZE) acc = READ_AND_MAP(lid); else acc = READ_AND_MAP(0); while (i<SIZE){ acc = REDUCE(acc, READ_AND_MAP(i)); i += big_block; //get_global_size(0); } ldata[lid] = acc; __syncthreads(); if ((lid<group_size) && (lid < 512) && ((lid + 512)<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 512]); } __syncthreads(); if ((lid<group_size) && (lid < 256) && ((lid + 256)<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 256]); } __syncthreads(); if ((lid<group_size) && (lid < 128) && ((lid + 128)<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 128]); } __syncthreads(); if ((lid<group_size) && (lid < 64 ) && ((lid + 64 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 64 ]); } __syncthreads(); if ((lid<group_size) && (lid < 32 ) && ((lid + 32 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 32 ]); } __syncthreads(); if ((lid<group_size) && (lid < 16 ) && ((lid + 16 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 16 ]); } __syncthreads(); if ((lid<group_size) && (lid < 8 ) && ((lid + 8 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 8 ]); } __syncthreads(); if ((lid<group_size) && (lid < 4 ) && ((lid + 4 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 4 ]); } __syncthreads(); if ((lid<group_size) && (lid < 2 ) && ((lid + 2 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 2 ]); } __syncthreads(); if ((lid<group_size) && (lid < 1 ) && ((lid + 1 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 1 ]); } __syncthreads(); out[blockIdx.x] = ldata[0]; } /** * \brief global_max_min: Look for the maximum an the minimum of an array. * * * * @param data2: Float2 pointer to global memory storing the vector of pre-reduced data (workgroup size). * @param maximum: Float pointer to global memory storing the maximum value * @param minumum: Float pointer to global memory storing the minimum value * **/ __global__ void max_min_stage2( const float2 *data2, float *maximum, float *minimum){ __shared__ float2 ldata[WORKGROUP_SIZE]; unsigned int lid = threadIdx.x; unsigned int group_size = min((unsigned int) blockDim.x, (unsigned int) WORKGROUP_SIZE); float2 acc; //= make_float2(-1.0f, -1.0f); if (lid<=group_size){ ldata[lid] = data2[lid]; };//else{ // ldata[lid] = acc; //} __syncthreads(); if ((lid<group_size) && (lid < 512) && ((lid + 512)<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 512]); } __syncthreads(); if ((lid<group_size) && (lid < 256) && ((lid + 256)<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 256]); } __syncthreads(); if ((lid<group_size) && (lid < 128) && ((lid + 128)<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 128]); } __syncthreads(); if ((lid<group_size) && (lid < 64 ) && ((lid + 64 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 64 ]); } __syncthreads(); if ((lid<group_size) && (lid < 32 ) && ((lid + 32 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 32 ]); } __syncthreads(); if ((lid<group_size) && (lid < 16 ) && ((lid + 16 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 16 ]); } __syncthreads(); if ((lid<group_size) && (lid < 8 ) && ((lid + 8 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 8 ]); } __syncthreads(); if ((lid<group_size) && (lid < 4 ) && ((lid + 4 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 4 ]); } __syncthreads(); if ((lid<group_size) && (lid < 2 ) && ((lid + 2 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 2 ]); } __syncthreads(); if (lid == 0 ){ if ( 1 < group_size){ acc = REDUCE(ldata[0], ldata[1]); }else{ acc = ldata[0]; } maximum[0] = acc.x; minimum[0] = acc.y; } } /** * \brief histogram: calculate the histogram of an image * bin = (lum[i] - lumMin) / lumRange * numBins * * * @param data: Float pointer to global memory storing the log of the luminance. * @param maximum: Float pointer to global memory storing the maximum value * @param minumum: Float pointer to global memory storing the minimum value * **/ __global__ void histogram(const float* const lum, float* lumMin, float* lumMax, unsigned int* bins, int numBins, unsigned int SIZE) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx>=SIZE) return; int bin = (lum[idx] - lumMin[0]) / (lumMax[0] - lumMin[0]) * numBins; atomicAdd(&bins[bin],1); } /** * \brief blelloch1: Exclusive scan phase 1: reduction like perent mode, * * * * @param data: Integer pointer to global memory storing the data: modified in place. * @param d: Integer: scale at which we are working * @param SIZE: integer representing the size of the array * **/ __global__ void blelloch1( unsigned int* data, int step, int SIZE) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ((idx+step<=SIZE) && (idx % step) == 0 ){ data[idx+step-1] += data[idx + step/2 - 1]; } } __global__ void blelloch2( unsigned int* data, int step, int SIZE) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx == 0) data[SIZE-1] = data[SIZE/2-1]; } __global__ void blelloch3( unsigned int* data, int step, int SIZE) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx == 0) data[SIZE/2-1] = 0; } __global__ void blelloch4( unsigned int* data, int step, int SIZE) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ((idx+step<=SIZE) && ((idx % step) == 0 )){ unsigned int temp = data[idx + step/2 - 1]; data[idx + step/2 -1] = data[idx + step - 1]; data[idx + step - 1] += temp; } } #include "utils.h" void print_cuda_array(unsigned int* d_data, size_t size){ unsigned int *h_data; h_data = (unsigned int *)malloc(sizeof(unsigned int)*size); hipMemcpy(h_data, d_data, sizeof(unsigned int) * size, hipMemcpyDeviceToHost); for (int i=0; i<size; i++){ // if (i%10==39) // printf("%d\n",h_data[i]); // else printf("%d ",h_data[i]); } printf("\n"); free(h_data); } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { //TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum 2) subtract them to find the range 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ //Malloc stuff: float2 *d_data2; float *d_min, *d_max; checkCudaErrors(hipMalloc(&d_min, (size_t) sizeof(float))); checkCudaErrors(hipMalloc(&d_max, (size_t) sizeof(float))); //1. get maximum and minimum of logLuminance channel. int image_size = numRows*numCols; float wg_float = fminf((float) WORKGROUP_SIZE, sqrtf((float)image_size)); int red_size = pow(2, (int)ceil(logf(wg_float)/logf(2.0f))); int memory = sizeof(float) * 2 * red_size; //temporary storage for reduction checkCudaErrors(hipMalloc(&d_data2, (size_t)memory)); hipLaunchKernelGGL(( max_min_stage1), dim3(red_size), dim3(red_size), 0, 0, d_logLuminance, d_data2, image_size); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( max_min_stage2), dim3(1), dim3(red_size), 0, 0, d_data2, d_max, d_min ); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); float mmin[1]; float mmax[1]; hipMemcpy(mmin, d_min, sizeof(float) * 1, hipMemcpyDeviceToHost); hipMemcpy(mmax, d_max, sizeof(float) * 1, hipMemcpyDeviceToHost); printf( "CUDA Min: %f Max: %f\n",mmin[0],mmax[0]); min_logLum = mmin[0]; max_logLum = mmax[0]; // 3) generate a histogram of all the values in the logLuminance channel using // the formula: bin = (lum[i] - lumMin) / lumRange * numBins hipLaunchKernelGGL(( histogram), dim3(((image_size+31)/32)),dim3(32), 0, 0, d_logLuminance, d_min, d_max, d_cdf, numBins, image_size); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); print_cuda_array(d_cdf,numBins); // 4) Perform an exclusive scan (prefix sum) on the histogram to get // the cumulative distribution of luminance values (this should go in the // incoming d_cdf pointer which already has been allocated for you) */ int dmax = (int) ceil(log(1.0*numBins)/log(2.0)); printf( "numBins= %d; dmax=%d \n",numBins,dmax); for (int d=0; d<(dmax-1); d++){ printf( "CUDA blelloch1 d= %d/%d (%d)\n",d,dmax,1<<(d+1)); hipLaunchKernelGGL(( blelloch1), dim3(((numBins+WORKGROUP_SIZE-1)/WORKGROUP_SIZE)),dim3(WORKGROUP_SIZE), 0, 0, d_cdf, 1<<(d+1), numBins); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } print_cuda_array(d_cdf,numBins); hipLaunchKernelGGL(( blelloch2), dim3(1),dim3(1), 0, 0, d_cdf, 1, numBins); hipLaunchKernelGGL(( blelloch3), dim3(1),dim3(1), 0, 0, d_cdf, 1, numBins); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); for (int d=dmax-2;d>=0; d--){ printf( "CUDA blelloch2 d= %d/%d (%d)\n",d,dmax,1<<(d+1)); hipLaunchKernelGGL(( blelloch4), dim3(((numBins+WORKGROUP_SIZE-1)/WORKGROUP_SIZE)),dim3(WORKGROUP_SIZE), 0, 0, d_cdf, 1<<(d+1), numBins); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } print_cuda_array(d_cdf,numBins); // Free memory checkCudaErrors(hipFree(d_data2)); checkCudaErrors(hipFree(d_max)); checkCudaErrors(hipFree(d_min)); }
c21e8618034506b9bef14e1526d1e6f82aacf9f9.cu
/* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Definition Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include <stdio.h> #define REDUCE(a, b) (make_float2(fmaxf(a.x,b.x),fminf(a.y,b.y))) #define READ_AND_MAP(i) (make_float2(data[i],data[i])) #define WORKGROUP_SIZE 256 /** * \brief max_min_global_stage1: Look for the maximum an the minimum of an array. stage1 * * optimal workgroup size: 2^n greater than sqrt(SIZE), limited to 512 * optimal total item size: (workgroup size)^2 * if SIZE >total item size: adjust seq_count. * * @param data: Float pointer to global memory storing the vector of data. * @param out: Float2 pointer to global memory storing the temporary results (workgroup size) * @param seq_count: how many blocksize each thread should read * @param SIZE: size of the * **/ __global__ void max_min_stage1( const float *data, float2 *out, unsigned int SIZE){ __shared__ float2 ldata[WORKGROUP_SIZE]; unsigned int group_size = min((unsigned int) blockDim.x, (unsigned int) WORKGROUP_SIZE); unsigned int lid = threadIdx.x; float2 acc; unsigned int big_block = group_size * gridDim.x; unsigned int i = lid + group_size * blockIdx.x; if (lid<SIZE) acc = READ_AND_MAP(lid); else acc = READ_AND_MAP(0); while (i<SIZE){ acc = REDUCE(acc, READ_AND_MAP(i)); i += big_block; //get_global_size(0); } ldata[lid] = acc; __syncthreads(); if ((lid<group_size) && (lid < 512) && ((lid + 512)<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 512]); } __syncthreads(); if ((lid<group_size) && (lid < 256) && ((lid + 256)<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 256]); } __syncthreads(); if ((lid<group_size) && (lid < 128) && ((lid + 128)<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 128]); } __syncthreads(); if ((lid<group_size) && (lid < 64 ) && ((lid + 64 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 64 ]); } __syncthreads(); if ((lid<group_size) && (lid < 32 ) && ((lid + 32 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 32 ]); } __syncthreads(); if ((lid<group_size) && (lid < 16 ) && ((lid + 16 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 16 ]); } __syncthreads(); if ((lid<group_size) && (lid < 8 ) && ((lid + 8 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 8 ]); } __syncthreads(); if ((lid<group_size) && (lid < 4 ) && ((lid + 4 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 4 ]); } __syncthreads(); if ((lid<group_size) && (lid < 2 ) && ((lid + 2 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 2 ]); } __syncthreads(); if ((lid<group_size) && (lid < 1 ) && ((lid + 1 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 1 ]); } __syncthreads(); out[blockIdx.x] = ldata[0]; } /** * \brief global_max_min: Look for the maximum an the minimum of an array. * * * * @param data2: Float2 pointer to global memory storing the vector of pre-reduced data (workgroup size). * @param maximum: Float pointer to global memory storing the maximum value * @param minumum: Float pointer to global memory storing the minimum value * **/ __global__ void max_min_stage2( const float2 *data2, float *maximum, float *minimum){ __shared__ float2 ldata[WORKGROUP_SIZE]; unsigned int lid = threadIdx.x; unsigned int group_size = min((unsigned int) blockDim.x, (unsigned int) WORKGROUP_SIZE); float2 acc; //= make_float2(-1.0f, -1.0f); if (lid<=group_size){ ldata[lid] = data2[lid]; };//else{ // ldata[lid] = acc; //} __syncthreads(); if ((lid<group_size) && (lid < 512) && ((lid + 512)<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 512]); } __syncthreads(); if ((lid<group_size) && (lid < 256) && ((lid + 256)<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 256]); } __syncthreads(); if ((lid<group_size) && (lid < 128) && ((lid + 128)<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 128]); } __syncthreads(); if ((lid<group_size) && (lid < 64 ) && ((lid + 64 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 64 ]); } __syncthreads(); if ((lid<group_size) && (lid < 32 ) && ((lid + 32 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 32 ]); } __syncthreads(); if ((lid<group_size) && (lid < 16 ) && ((lid + 16 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 16 ]); } __syncthreads(); if ((lid<group_size) && (lid < 8 ) && ((lid + 8 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 8 ]); } __syncthreads(); if ((lid<group_size) && (lid < 4 ) && ((lid + 4 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 4 ]); } __syncthreads(); if ((lid<group_size) && (lid < 2 ) && ((lid + 2 )<group_size)){ ldata[lid] = REDUCE(ldata[lid], ldata[lid + 2 ]); } __syncthreads(); if (lid == 0 ){ if ( 1 < group_size){ acc = REDUCE(ldata[0], ldata[1]); }else{ acc = ldata[0]; } maximum[0] = acc.x; minimum[0] = acc.y; } } /** * \brief histogram: calculate the histogram of an image * bin = (lum[i] - lumMin) / lumRange * numBins * * * @param data: Float pointer to global memory storing the log of the luminance. * @param maximum: Float pointer to global memory storing the maximum value * @param minumum: Float pointer to global memory storing the minimum value * **/ __global__ void histogram(const float* const lum, float* lumMin, float* lumMax, unsigned int* bins, int numBins, unsigned int SIZE) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx>=SIZE) return; int bin = (lum[idx] - lumMin[0]) / (lumMax[0] - lumMin[0]) * numBins; atomicAdd(&bins[bin],1); } /** * \brief blelloch1: Exclusive scan phase 1: reduction like perent mode, * * * * @param data: Integer pointer to global memory storing the data: modified in place. * @param d: Integer: scale at which we are working * @param SIZE: integer representing the size of the array * **/ __global__ void blelloch1( unsigned int* data, int step, int SIZE) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ((idx+step<=SIZE) && (idx % step) == 0 ){ data[idx+step-1] += data[idx + step/2 - 1]; } } __global__ void blelloch2( unsigned int* data, int step, int SIZE) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx == 0) data[SIZE-1] = data[SIZE/2-1]; } __global__ void blelloch3( unsigned int* data, int step, int SIZE) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx == 0) data[SIZE/2-1] = 0; } __global__ void blelloch4( unsigned int* data, int step, int SIZE) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ((idx+step<=SIZE) && ((idx % step) == 0 )){ unsigned int temp = data[idx + step/2 - 1]; data[idx + step/2 -1] = data[idx + step - 1]; data[idx + step - 1] += temp; } } #include "utils.h" void print_cuda_array(unsigned int* d_data, size_t size){ unsigned int *h_data; h_data = (unsigned int *)malloc(sizeof(unsigned int)*size); cudaMemcpy(h_data, d_data, sizeof(unsigned int) * size, cudaMemcpyDeviceToHost); for (int i=0; i<size; i++){ // if (i%10==39) // printf("%d\n",h_data[i]); // else printf("%d ",h_data[i]); } printf("\n"); free(h_data); } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { //TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum 2) subtract them to find the range 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ //Malloc stuff: float2 *d_data2; float *d_min, *d_max; checkCudaErrors(cudaMalloc(&d_min, (size_t) sizeof(float))); checkCudaErrors(cudaMalloc(&d_max, (size_t) sizeof(float))); //1. get maximum and minimum of logLuminance channel. int image_size = numRows*numCols; float wg_float = fminf((float) WORKGROUP_SIZE, sqrtf((float)image_size)); int red_size = pow(2, (int)ceil(logf(wg_float)/logf(2.0f))); int memory = sizeof(float) * 2 * red_size; //temporary storage for reduction checkCudaErrors(cudaMalloc(&d_data2, (size_t)memory)); max_min_stage1<<<red_size, red_size>>>(d_logLuminance, d_data2, image_size); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); max_min_stage2<<<1, red_size>>>(d_data2, d_max, d_min ); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); float mmin[1]; float mmax[1]; cudaMemcpy(mmin, d_min, sizeof(float) * 1, cudaMemcpyDeviceToHost); cudaMemcpy(mmax, d_max, sizeof(float) * 1, cudaMemcpyDeviceToHost); printf( "CUDA Min: %f Max: %f\n",mmin[0],mmax[0]); min_logLum = mmin[0]; max_logLum = mmax[0]; // 3) generate a histogram of all the values in the logLuminance channel using // the formula: bin = (lum[i] - lumMin) / lumRange * numBins histogram<<<((image_size+31)/32),32>>>(d_logLuminance, d_min, d_max, d_cdf, numBins, image_size); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); print_cuda_array(d_cdf,numBins); // 4) Perform an exclusive scan (prefix sum) on the histogram to get // the cumulative distribution of luminance values (this should go in the // incoming d_cdf pointer which already has been allocated for you) */ int dmax = (int) ceil(log(1.0*numBins)/log(2.0)); printf( "numBins= %d; dmax=%d \n",numBins,dmax); for (int d=0; d<(dmax-1); d++){ printf( "CUDA blelloch1 d= %d/%d (%d)\n",d,dmax,1<<(d+1)); blelloch1<<<((numBins+WORKGROUP_SIZE-1)/WORKGROUP_SIZE),WORKGROUP_SIZE>>>(d_cdf, 1<<(d+1), numBins); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } print_cuda_array(d_cdf,numBins); blelloch2<<<1,1>>>(d_cdf, 1, numBins); blelloch3<<<1,1>>>(d_cdf, 1, numBins); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); for (int d=dmax-2;d>=0; d--){ printf( "CUDA blelloch2 d= %d/%d (%d)\n",d,dmax,1<<(d+1)); blelloch4<<<((numBins+WORKGROUP_SIZE-1)/WORKGROUP_SIZE),WORKGROUP_SIZE>>>(d_cdf, 1<<(d+1), numBins); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } print_cuda_array(d_cdf,numBins); // Free memory checkCudaErrors(cudaFree(d_data2)); checkCudaErrors(cudaFree(d_max)); checkCudaErrors(cudaFree(d_min)); }
53c97d3d50bcf69c66b8eb5e9ef17ee2920de06f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void FilmGradeKernelD( float* p_Input, float* p_Output, int p_Width, int p_Height, float p_Pivot, int p_Display) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; float height = p_Height; float width = p_Width; float X = x; float Y = y; const float RES = width / 1920.0f; float overlay = 0.0f; if (x < p_Width && y < p_Height) { const int index = (y * p_Width + x) * 4; if (p_Display == 1) { overlay = Y / height >= p_Pivot && Y / height <= p_Pivot + 0.005f * RES ? (fmodf(X, 2.0f) != 0.0f ? 1.0f : 0.0f) : p_Output[index] >= (Y - 5.0f * RES) / height && p_Output[index] <= (Y + 5.0f * RES) / height ? 1.0f : 0.0f; p_Output[index] = overlay; } if (p_Display == 2) { overlay = Y / height >= p_Pivot && Y / height <= p_Pivot + 0.005f * RES ? (fmodf(X, 2.0f) != 0.0f ? 1.0f : 0.0f) : p_Input[index] >= (Y - 5.0f * RES) / height && p_Input[index] <= (Y + 5.0f * RES) / height ? 1.0f : 0.0f; p_Output[index] = overlay == 0.0f ? p_Output[index] : overlay; }}}
53c97d3d50bcf69c66b8eb5e9ef17ee2920de06f.cu
#include "includes.h" __global__ void FilmGradeKernelD( float* p_Input, float* p_Output, int p_Width, int p_Height, float p_Pivot, int p_Display) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; float height = p_Height; float width = p_Width; float X = x; float Y = y; const float RES = width / 1920.0f; float overlay = 0.0f; if (x < p_Width && y < p_Height) { const int index = (y * p_Width + x) * 4; if (p_Display == 1) { overlay = Y / height >= p_Pivot && Y / height <= p_Pivot + 0.005f * RES ? (fmodf(X, 2.0f) != 0.0f ? 1.0f : 0.0f) : p_Output[index] >= (Y - 5.0f * RES) / height && p_Output[index] <= (Y + 5.0f * RES) / height ? 1.0f : 0.0f; p_Output[index] = overlay; } if (p_Display == 2) { overlay = Y / height >= p_Pivot && Y / height <= p_Pivot + 0.005f * RES ? (fmodf(X, 2.0f) != 0.0f ? 1.0f : 0.0f) : p_Input[index] >= (Y - 5.0f * RES) / height && p_Input[index] <= (Y + 5.0f * RES) / height ? 1.0f : 0.0f; p_Output[index] = overlay == 0.0f ? p_Output[index] : overlay; }}}
78a97619302d0c65744835ab97695bad304c6399.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zgeadd_batched.cu normal z -> s, Sat Nov 15 19:53:59 2014 @author Mark Gates */ #include "common_magma.h" #define NB 64 /* ===================================================================== Batches slacpy of multiple arrays; y-dimension of grid is different arrays, x-dimension of grid is blocks for each array. Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread adds one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. TODO. Block in both directions, for large matrices. E.g., each block does 64x64 tile, instead of 64xN tile. */ __global__ void sgeadd_batched_kernel( int m, int n, float alpha, const float * const *dAarray, int ldda, float **dBarray, int lddb ) { // dA and dB iterate across row i const float *dA = dAarray[ blockIdx.y ]; float *dB = dBarray[ blockIdx.y ]; int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const float *dAend = dA + n*ldda; while( dA < dAend ) { *dB = alpha*(*dA) + (*dB); dA += ldda; dB += lddb; } } } /* ===================================================================== */ /** Purpose ------- ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i], for i = 0, ..., batchCount-1. Arguments --------- @param[in] m INTEGER The number of rows of each matrix dAarray[i]. M >= 0. @param[in] n INTEGER The number of columns of each matrix dAarray[i]. N >= 0. @param[in] alpha REAL The scalar alpha. @param[in] dAarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a REAL array, dimension (LDDA,N) The m by n matrices dAarray[i]. @param[in] ldda INTEGER The leading dimension of each array dAarray[i]. LDDA >= max(1,M). @param[in,out] dBarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a REAL array, dimension (LDDB,N) The m by n matrices dBarray[i]. @param[in] lddb INTEGER The leading dimension of each array dBarray[i]. LDDB >= max(1,M). @param[in] batchCount INTEGER The number of matrices to add; length of dAarray and dBarray. batchCount >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_sgeadd_batched_q( magma_int_t m, magma_int_t n, float alpha, magmaFloat_const_ptr const dAarray[], magma_int_t ldda, magmaFloat_ptr dBarray[], magma_int_t lddb, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB, batchCount ); hipLaunchKernelGGL(( sgeadd_batched_kernel), dim3(grid), dim3(threads), 0, queue , m, n, alpha, dAarray, ldda, dBarray, lddb ); } /** @see magmablas_sgeadd_batched_q @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_sgeadd_batched( magma_int_t m, magma_int_t n, float alpha, magmaFloat_const_ptr const dAarray[], magma_int_t ldda, magmaFloat_ptr dBarray[], magma_int_t lddb, magma_int_t batchCount ) { magmablas_sgeadd_batched_q( m, n, alpha, dAarray, ldda, dBarray, lddb, batchCount, magma_stream ); }
78a97619302d0c65744835ab97695bad304c6399.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zgeadd_batched.cu normal z -> s, Sat Nov 15 19:53:59 2014 @author Mark Gates */ #include "common_magma.h" #define NB 64 /* ===================================================================== Batches slacpy of multiple arrays; y-dimension of grid is different arrays, x-dimension of grid is blocks for each array. Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread adds one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. TODO. Block in both directions, for large matrices. E.g., each block does 64x64 tile, instead of 64xN tile. */ __global__ void sgeadd_batched_kernel( int m, int n, float alpha, const float * const *dAarray, int ldda, float **dBarray, int lddb ) { // dA and dB iterate across row i const float *dA = dAarray[ blockIdx.y ]; float *dB = dBarray[ blockIdx.y ]; int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const float *dAend = dA + n*ldda; while( dA < dAend ) { *dB = alpha*(*dA) + (*dB); dA += ldda; dB += lddb; } } } /* ===================================================================== */ /** Purpose ------- ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i], for i = 0, ..., batchCount-1. Arguments --------- @param[in] m INTEGER The number of rows of each matrix dAarray[i]. M >= 0. @param[in] n INTEGER The number of columns of each matrix dAarray[i]. N >= 0. @param[in] alpha REAL The scalar alpha. @param[in] dAarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a REAL array, dimension (LDDA,N) The m by n matrices dAarray[i]. @param[in] ldda INTEGER The leading dimension of each array dAarray[i]. LDDA >= max(1,M). @param[in,out] dBarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a REAL array, dimension (LDDB,N) The m by n matrices dBarray[i]. @param[in] lddb INTEGER The leading dimension of each array dBarray[i]. LDDB >= max(1,M). @param[in] batchCount INTEGER The number of matrices to add; length of dAarray and dBarray. batchCount >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_sgeadd_batched_q( magma_int_t m, magma_int_t n, float alpha, magmaFloat_const_ptr const dAarray[], magma_int_t ldda, magmaFloat_ptr dBarray[], magma_int_t lddb, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB, batchCount ); sgeadd_batched_kernel<<< grid, threads, 0, queue >>>( m, n, alpha, dAarray, ldda, dBarray, lddb ); } /** @see magmablas_sgeadd_batched_q @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_sgeadd_batched( magma_int_t m, magma_int_t n, float alpha, magmaFloat_const_ptr const dAarray[], magma_int_t ldda, magmaFloat_ptr dBarray[], magma_int_t lddb, magma_int_t batchCount ) { magmablas_sgeadd_batched_q( m, n, alpha, dAarray, ldda, dBarray, lddb, batchCount, magma_stream ); }
d6ce1bd43aee55a4961eaf28a36733e97eba00c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <starpu.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include "convolution_core.h" template <typename T> __global__ void convolution_kernel( const T *d_f, // Padded matrix const unsigned int paddedN, // N+r const unsigned int paddedM, // M+r const T *d_g, // kernel const int r, // radius T *d_h, // output const unsigned int N, // N const unsigned int M // M ) { // Set the padding size and filter size unsigned int paddingSize = r; unsigned int filterSize = 2 * r + 1; // Set the pixel coordinate. // Threads in the padding size wont do anything. const unsigned int j = blockIdx.x * blockDim.x + threadIdx.x + paddingSize; const unsigned int i = blockIdx.y * blockDim.y + threadIdx.y + paddingSize; // The multiply-add operation for the pixel coordinate ( j, i ) if( j >= paddingSize && j < paddedN - paddingSize && i >= paddingSize && i < paddedM - paddingSize ) { unsigned int oPixelPos = ( i - paddingSize ) * N + ( j - paddingSize ); d_h[oPixelPos] = 0.0; for( int k = -r; k <=r; k++ ) { for( int l = -r; l <= r; l++ ) { unsigned int iPixelPos = ( i + k ) * paddedN + ( j + l ); unsigned int coefPos = ( k + r ) * filterSize + ( l + r ); d_h[oPixelPos] += d_f[iPixelPos] * d_g[coefPos]; } } } } inline unsigned int iDivUp( const unsigned int &a, const unsigned int &b ) { return ( a%b != 0 ) ? (a/b+1):(a/b); } #if 0 extern "C++" void compute_convolution_gpu( vector< TestFunction<float>* > &subi, TestKernel<float> &g, vector< TestFunction<float>* > &subo, int M, int N ) { int r = g.radius; printf("[GPU]: Compute convolution ... \n"); // Allocate the memory on a device (corresponding to a smaller conv_matrix) // ---------------------------------------------------------------------------- float *d_f = NULL; unsigned int paddedMatrixSizeByte = subi[0]->get_mem_size(); hipMalloc( reinterpret_cast<void **>(&d_f), paddedMatrixSizeByte ); float *d_h = NULL; unsigned int imageSizeByte = subo[0]->get_mem_size(); hipMalloc( reinterpret_cast<void **>(&d_h), imageSizeByte ); float *d_g = NULL; unsigned int filterKernelSizeByte = g.get_mem_size(); hipMalloc( reinterpret_cast<void **>(&d_g), filterKernelSizeByte ); float *h_g = g.data; // Kernel hipMemcpy( d_g, h_g, filterKernelSizeByte, hipMemcpyHostToDevice ); // Host to Device // Setting the execution configuration // ---------------------------------------------------------------------------- const unsigned int blockN = 32; const unsigned int blockM = 32; const dim3 grid( iDivUp( N, blockN ), iDivUp( M, blockM ) ); const dim3 threadBlock( blockN, blockM ); printf("Convolution GPU tasks ...\n"); for (int i=0; i<subi.size(); i++) { float *h_f = subi[i]->data; // Input float *h_h = subo[i]->data; // Output // Transfer from a host to a device hipMemcpy(d_f, h_f, paddedMatrixSizeByte, hipMemcpyHostToDevice ); // Host to Device // Convolve: call cuda kernel hipLaunchKernelGGL(( convolution_kernel), dim3(grid),dim3(threadBlock), 0, 0, d_f, subi[i]->x_num, subi[i]->y_num, d_g, r, d_h, subo[i]->x_num, subo[i]->y_num); hipDeviceSynchronize(); // Transfer result from the device to the host hipMemcpy( h_h, d_h, imageSizeByte, hipMemcpyDeviceToHost ); // Device to Host } } #endif extern "C++" void compute_convolution_gpu_func(void *buffers[], void *cl_arg) { float *fo, *fi, *fk; size_t no, mo, ni, mi, nk; int M, N; starpu_codelet_unpack_args(cl_arg, &M, &N); // These are cuda pointers fo = (float*)STARPU_MATRIX_GET_PTR(buffers[0]); no = (unsigned)STARPU_MATRIX_GET_NX(buffers[0]); mo = (unsigned)STARPU_MATRIX_GET_NY(buffers[0]); fi = (float*)STARPU_MATRIX_GET_PTR(buffers[1]); ni = (unsigned)STARPU_MATRIX_GET_NX(buffers[1]); mi = (unsigned)STARPU_MATRIX_GET_NY(buffers[1]); fk = (float*)STARPU_MATRIX_GET_PTR(buffers[2]); nk = (unsigned)STARPU_MATRIX_GET_NX(buffers[2]); int r = (nk-1)/2; /* printf("%d, %d,%d, %d, %d, %d, %d, %d\n", M, N, mi, ni, mo, no, nk, r) ; */ #if 0 const unsigned int blockN = 32; const unsigned int blockM = 32; const dim3 grid( iDivUp( N, blockN ), iDivUp( M, blockM ) ); const dim3 threadBlock( blockN, blockM ); // Convolve: call cuda kernel hipLaunchKernelGGL(( convolution_kernel), dim3(grid),dim3(threadBlock), starpu_cuda_get_local_stream(), 0, fi, ni, mi, fk, r, fo, no, mo); hipDeviceSynchronize(); #endif const unsigned int blockN = 32; const unsigned int blockM = 32; const dim3 grid( iDivUp( N, blockN ), iDivUp( M, blockM ) ); const dim3 threadBlock( blockN, blockM ); // No shared memory: third parameter is 0 hipLaunchKernelGGL(( convolution_kernel), dim3(grid), dim3(threadBlock), 0, starpu_cuda_get_local_stream(), fi, ni, mi, fk, r, fo, no, mo); hipStreamSynchronize(starpu_cuda_get_local_stream()); }
d6ce1bd43aee55a4961eaf28a36733e97eba00c8.cu
#include <starpu.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include "convolution_core.h" template <typename T> __global__ void convolution_kernel( const T *d_f, // Padded matrix const unsigned int paddedN, // N+r const unsigned int paddedM, // M+r const T *d_g, // kernel const int r, // radius T *d_h, // output const unsigned int N, // N const unsigned int M // M ) { // Set the padding size and filter size unsigned int paddingSize = r; unsigned int filterSize = 2 * r + 1; // Set the pixel coordinate. // Threads in the padding size wont do anything. const unsigned int j = blockIdx.x * blockDim.x + threadIdx.x + paddingSize; const unsigned int i = blockIdx.y * blockDim.y + threadIdx.y + paddingSize; // The multiply-add operation for the pixel coordinate ( j, i ) if( j >= paddingSize && j < paddedN - paddingSize && i >= paddingSize && i < paddedM - paddingSize ) { unsigned int oPixelPos = ( i - paddingSize ) * N + ( j - paddingSize ); d_h[oPixelPos] = 0.0; for( int k = -r; k <=r; k++ ) { for( int l = -r; l <= r; l++ ) { unsigned int iPixelPos = ( i + k ) * paddedN + ( j + l ); unsigned int coefPos = ( k + r ) * filterSize + ( l + r ); d_h[oPixelPos] += d_f[iPixelPos] * d_g[coefPos]; } } } } inline unsigned int iDivUp( const unsigned int &a, const unsigned int &b ) { return ( a%b != 0 ) ? (a/b+1):(a/b); } #if 0 extern "C++" void compute_convolution_gpu( vector< TestFunction<float>* > &subi, TestKernel<float> &g, vector< TestFunction<float>* > &subo, int M, int N ) { int r = g.radius; printf("[GPU]: Compute convolution ... \n"); // Allocate the memory on a device (corresponding to a smaller conv_matrix) // ---------------------------------------------------------------------------- float *d_f = NULL; unsigned int paddedMatrixSizeByte = subi[0]->get_mem_size(); cudaMalloc( reinterpret_cast<void **>(&d_f), paddedMatrixSizeByte ); float *d_h = NULL; unsigned int imageSizeByte = subo[0]->get_mem_size(); cudaMalloc( reinterpret_cast<void **>(&d_h), imageSizeByte ); float *d_g = NULL; unsigned int filterKernelSizeByte = g.get_mem_size(); cudaMalloc( reinterpret_cast<void **>(&d_g), filterKernelSizeByte ); float *h_g = g.data; // Kernel cudaMemcpy( d_g, h_g, filterKernelSizeByte, cudaMemcpyHostToDevice ); // Host to Device // Setting the execution configuration // ---------------------------------------------------------------------------- const unsigned int blockN = 32; const unsigned int blockM = 32; const dim3 grid( iDivUp( N, blockN ), iDivUp( M, blockM ) ); const dim3 threadBlock( blockN, blockM ); printf("Convolution GPU tasks ...\n"); for (int i=0; i<subi.size(); i++) { float *h_f = subi[i]->data; // Input float *h_h = subo[i]->data; // Output // Transfer from a host to a device cudaMemcpy(d_f, h_f, paddedMatrixSizeByte, cudaMemcpyHostToDevice ); // Host to Device // Convolve: call cuda kernel convolution_kernel<<<grid,threadBlock>>>( d_f, subi[i]->x_num, subi[i]->y_num, d_g, r, d_h, subo[i]->x_num, subo[i]->y_num); cudaDeviceSynchronize(); // Transfer result from the device to the host cudaMemcpy( h_h, d_h, imageSizeByte, cudaMemcpyDeviceToHost ); // Device to Host } } #endif extern "C++" void compute_convolution_gpu_func(void *buffers[], void *cl_arg) { float *fo, *fi, *fk; size_t no, mo, ni, mi, nk; int M, N; starpu_codelet_unpack_args(cl_arg, &M, &N); // These are cuda pointers fo = (float*)STARPU_MATRIX_GET_PTR(buffers[0]); no = (unsigned)STARPU_MATRIX_GET_NX(buffers[0]); mo = (unsigned)STARPU_MATRIX_GET_NY(buffers[0]); fi = (float*)STARPU_MATRIX_GET_PTR(buffers[1]); ni = (unsigned)STARPU_MATRIX_GET_NX(buffers[1]); mi = (unsigned)STARPU_MATRIX_GET_NY(buffers[1]); fk = (float*)STARPU_MATRIX_GET_PTR(buffers[2]); nk = (unsigned)STARPU_MATRIX_GET_NX(buffers[2]); int r = (nk-1)/2; /* printf("%d, %d,%d, %d, %d, %d, %d, %d\n", M, N, mi, ni, mo, no, nk, r) ; */ #if 0 const unsigned int blockN = 32; const unsigned int blockM = 32; const dim3 grid( iDivUp( N, blockN ), iDivUp( M, blockM ) ); const dim3 threadBlock( blockN, blockM ); // Convolve: call cuda kernel convolution_kernel<<<grid,threadBlock, starpu_cuda_get_local_stream()>>>(fi, ni, mi, fk, r, fo, no, mo); cudaDeviceSynchronize(); #endif const unsigned int blockN = 32; const unsigned int blockM = 32; const dim3 grid( iDivUp( N, blockN ), iDivUp( M, blockM ) ); const dim3 threadBlock( blockN, blockM ); // No shared memory: third parameter is 0 convolution_kernel<<< grid, threadBlock, 0, starpu_cuda_get_local_stream()>>>(fi, ni, mi, fk, r, fo, no, mo); cudaStreamSynchronize(starpu_cuda_get_local_stream()); }
e9f0154c9f869dee0a5b13163e8140de99bb3a48.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef __NVCC__ #include "hipcub/hipcub.hpp" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "paddle/fluid/operators/group_norm_op.h" #include "paddle/fluid/platform/cuda_device_function.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using DataLayout = framework::DataLayout; enum GroupNormKernelFlags { kHasScale = 1, kHasBias = 2 }; #define CHECK_CASE(i, flags, kernel_name, ...) \ if (i == flags) { \ hipLaunchKernelGGL(( kernel_name<T, i>), dim3(grid), dim3(threads), 0, dev_ctx.stream(), __VA_ARGS__); \ } // 0 for no scale, no bias // 1 for has scale, no bias // 2 for no scale, has bias // 3 for has scale, has bias #define UNROLL_ALL_CASES(flags, kernel_name, ...) \ CHECK_CASE(0, flags, kernel_name, __VA_ARGS__) \ CHECK_CASE(1, flags, kernel_name, __VA_ARGS__) \ CHECK_CASE(2, flags, kernel_name, __VA_ARGS__) \ CHECK_CASE(3, flags, kernel_name, __VA_ARGS__) template <typename T> __device__ __inline__ void CudaAtomicAddWithWarp(T* sum, T value) { typedef hipcub::WarpReduce<T> WarpReduce; typename WarpReduce::TempStorage temp_storage; value = WarpReduce(temp_storage).Sum(value); if (cub::LaneId() == 0) platform::CudaAtomicAdd(sum, value); } template <typename T> __global__ void GroupNormForwardGetMeanAndVar(const T* x, int N, int C, int W, int imsize, int groups, int group_size, T* mean, T* var, const DataLayout data_layout) { int gid = blockIdx.y; int cid = blockIdx.x; int bid = blockIdx.z; int H = imsize / W; int number = min(group_size, static_cast<int>(C - gid * group_size)); int ccid = gid * group_size + cid; if (ccid >= C) return; T x_mean = 0, x_var = 0; for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) { T val; if (data_layout == DataLayout::kNCHW) { val = x[(bid * C + ccid) * imsize + imid]; } else { int hid = imid / W; int wid = imid % W; val = x[(bid * H + hid) * W * C + wid * C + ccid]; } x_mean += val; x_var += val * val; } x_mean /= number * imsize; x_var /= number * imsize; CudaAtomicAddWithWarp(&mean[bid * groups + gid], x_mean); CudaAtomicAddWithWarp(&var[bid * groups + gid], x_var); } template <typename T, int flags> __global__ void GroupNormForward(const T* x, const T* mean, const T* var, const T* scale, const T* bias, int N, int C, int W, int imsize, int groups, int group_size, T epsilon, T* y, T* real_var, const DataLayout data_layout) { int gid = blockIdx.y; int cid = blockIdx.x; int bid = blockIdx.z; int H = imsize / W; int ccid = gid * group_size + cid; if (ccid >= C) return; T x_mean = mean[bid * groups + gid]; T x_var = var[bid * groups + gid]; x_var = x_var - x_mean * x_mean; T var_inv = 1.0 / sqrt(x_var + epsilon); if (cid == 0 && threadIdx.x == 0) real_var[bid * groups + gid] = x_var; for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) { T val; int hid, wid; if (data_layout == DataLayout::kNCHW) { val = x[(bid * C + ccid) * imsize + imid]; } else { hid = imid / W; wid = imid % W; val = x[(bid * H + hid) * W * C + wid * C + ccid]; } val = (val - x_mean) * var_inv; if (flags & kHasScale) val *= scale[gid * group_size + cid]; if (flags & kHasBias) val += bias[gid * group_size + cid]; if (data_layout == DataLayout::kNCHW) { y[(bid * C + ccid) * imsize + imid] = val; } else { y[(bid * H + hid) * W * C + wid * C + ccid] = val; } } } template <typename T> class GroupNormKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); const float epsilon = ctx.Attr<float>("epsilon"); auto* scale = ctx.Input<Tensor>("Scale"); auto* bias = ctx.Input<Tensor>("Bias"); auto* x = ctx.Input<Tensor>("X"); auto* y = ctx.Output<Tensor>("Y"); auto* mean = ctx.Output<Tensor>("Mean"); auto* var = ctx.Output<Tensor>("Variance"); const auto groups = ctx.Attr<int>("groups"); const auto x_dims = x->dims(); const int C = (data_layout == DataLayout::kNCHW ? x_dims[1] : x_dims[x_dims.size() - 1]); const int group_size = C / groups; const int W = (data_layout == DataLayout::kNCHW ? x_dims[x_dims.size() - 1] : x_dims[x_dims.size() - 2]); y->mutable_data<T>(ctx.GetPlace()); mean->mutable_data<T>(ctx.GetPlace()); var->mutable_data<T>(ctx.GetPlace()); math::SetConstant<platform::CUDADeviceContext, T> set_zero; auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); Tensor temp_var; temp_var.mutable_data<T>(var->dims(), ctx.GetPlace()); set_zero(dev_ctx, mean, static_cast<T>(0)); set_zero(dev_ctx, &temp_var, static_cast<T>(0)); auto* x_data = x->data<T>(); auto* y_data = y->data<T>(); auto* mean_data = mean->data<T>(); auto* var_data = var->data<T>(); auto* temp_var_data = temp_var.data<T>(); const T* scale_data = nullptr; if (scale) scale_data = scale->data<T>(); const T* bias_data = nullptr; if (bias) bias_data = bias->data<T>(); int imsize = 1; if (data_layout == DataLayout::kNCHW) { for (int i = 2; i < x_dims.size(); ++i) { imsize *= x_dims[i]; } } else { for (int i = 1; i < x_dims.size() - 1; ++i) { imsize *= x_dims[i]; } } #ifdef __HIPCC__ int block_size = ::max(::min(256, imsize), 64); #else int block_size = ::min(1024, imsize); #endif dim3 grid(group_size, groups, x_dims[0]); dim3 threads(block_size, 1, 1); hipLaunchKernelGGL(( GroupNormForwardGetMeanAndVar<T>), dim3(grid), dim3(threads), 0, dev_ctx.stream(), x_data, x_dims[0], C, W, imsize, groups, group_size, mean_data, temp_var_data, data_layout); int flags = (scale_data != nullptr) * kHasScale + (bias_data != nullptr) * kHasBias; UNROLL_ALL_CASES(flags, GroupNormForward, x_data, mean_data, temp_var_data, scale_data, bias_data, x_dims[0], C, W, imsize, groups, group_size, epsilon, y_data, var_data, data_layout); } }; template <typename T, int flags> __global__ void GroupNormBackwardGetMeanAndVar( const T* x, const T* scale, const T* bias, const T* d_y, int N, int C, int W, int imsize, int groups, int group_size, T epsilon, T* d_mean, T* d_var, T* d_scale, T* d_bias, const DataLayout data_layout) { int gid = blockIdx.y; int cid = blockIdx.x; int bid = blockIdx.z; int H = imsize / W; int number = min(group_size, static_cast<int>(C - gid * group_size)); int ccid = gid * group_size + cid; if (ccid >= C) return; T x_scale = (flags & kHasScale) ? scale[ccid] : 1; T x_bias = (flags & kHasBias) ? bias[ccid] : 0; T x_scale_inv = 0; if (x_scale != 0) x_scale_inv = 1.0 / x_scale; T d_mean_data = 0, d_var_data = 0, d_scale_data = 0, d_bias_data = 0; for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) { T val, dval; if (data_layout == DataLayout::kNCHW) { val = x[(bid * C + ccid) * imsize + imid] - x_bias; dval = d_y[(bid * C + ccid) * imsize + imid]; } else { int hid = imid / W; int wid = imid % W; val = x[(bid * H + hid) * W * C + wid * C + ccid] - x_bias; dval = d_y[(bid * H + hid) * W * C + wid * C + ccid]; } d_var_data += val * dval; d_mean_data += dval * x_scale; val = val * x_scale_inv; d_bias_data += dval; d_scale_data += val * dval; } CudaAtomicAddWithWarp(&(d_mean[bid * groups + gid]), d_mean_data); CudaAtomicAddWithWarp(&(d_var[bid * groups + gid]), d_var_data); if (flags & kHasScale) CudaAtomicAddWithWarp(&(d_scale[ccid]), d_scale_data); if (flags & kHasBias) CudaAtomicAddWithWarp(&(d_bias[ccid]), d_bias_data); } template <typename T, int flags> __global__ void GroupNormBackward(const T* x, const T* d_y, const T* scale, const T* bias, const T* var, const T* d_mean, const T* d_var, int N, int C, int W, int imsize, int groups, int group_size, T epsilon, T* d_x, const DataLayout data_layout) { int gid = blockIdx.y; int cid = blockIdx.x; int bid = blockIdx.z; int H = imsize / W; int number = min(group_size, static_cast<int>(C - gid * group_size)); int ccid = gid * group_size + cid; if (ccid >= C) return; T x_var = var[bid * groups + gid]; T d_x_mean = d_mean[bid * groups + gid]; T d_x_var = d_var[bid * groups + gid]; T x_var_inv = 1.0 / sqrt(x_var + epsilon); T number_inv = 1.0 / (number * imsize); T x_scale = (flags & kHasScale) ? scale[ccid] : 1; T x_bias = (flags & kHasBias) ? bias[ccid] : 0; T x_scale_inv = 0; if (x_scale != 0) x_scale_inv = 1.0 / x_scale; for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) { if (data_layout == DataLayout::kNCHW) { T tmp = x[(bid * C + ccid) * imsize + imid]; T v_y = (tmp - x_bias) * x_scale_inv; T dly = d_y[(bid * C + ccid) * imsize + imid]; d_x[(bid * C + ccid) * imsize + imid] = x_var_inv * (dly * x_scale - number_inv * d_x_var * v_y - number_inv * d_x_mean); } else { int hid = imid / W; int wid = imid % W; T tmp = x[(bid * H + hid) * W * C + wid * C + ccid]; T v_y = (tmp - x_bias) * x_scale_inv; T dly = d_y[(bid * H + hid) * W * C + wid * C + ccid]; d_x[(bid * H + hid) * W * C + wid * C + ccid] = x_var_inv * (dly * x_scale - number_inv * d_x_var * v_y - number_inv * d_x_mean); } } } template <typename T> class GroupNormGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); const float epsilon = ctx.Attr<float>("epsilon"); auto* x = ctx.Input<Tensor>("Y"); auto* var = ctx.Input<Tensor>("Variance"); auto* scale = ctx.Input<Tensor>("Scale"); auto* bias = ctx.Input<Tensor>("Bias"); auto* d_y = ctx.Input<Tensor>(framework::GradVarName("Y")); const auto groups = ctx.Attr<int>("groups"); // init output auto* d_x = ctx.Output<Tensor>(framework::GradVarName("X")); auto* d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale")); auto* d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias")); const auto& x_dims = x->dims(); const int C = (data_layout == DataLayout::kNCHW ? x_dims[1] : x_dims[x_dims.size() - 1]); const int group_size = C / groups; const int W = (data_layout == DataLayout::kNCHW ? x_dims[x_dims.size() - 1] : x_dims[x_dims.size() - 2]); d_x->mutable_data<T>(ctx.GetPlace()); math::SetConstant<platform::CUDADeviceContext, T> set_zero; auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); Tensor temp_var; temp_var.mutable_data<T>(var->dims(), ctx.GetPlace()); set_zero(dev_ctx, &temp_var, static_cast<T>(0)); T* temp_var_data = temp_var.data<T>(); Tensor temp_mean; temp_mean.mutable_data<T>(var->dims(), ctx.GetPlace()); set_zero(dev_ctx, &temp_mean, static_cast<T>(0)); T* temp_mean_data = temp_mean.data<T>(); auto* x_data = x->data<T>(); T* d_x_data = nullptr; if (d_x) d_x_data = d_x->data<T>(); auto* y_data = d_y->data<T>(); auto* var_data = var->data<T>(); T* d_scale_data = nullptr; if (d_scale) { d_scale->mutable_data<T>(ctx.GetPlace()); set_zero(dev_ctx, d_scale, static_cast<T>(0)); d_scale_data = d_scale->data<T>(); } T* d_bias_data = nullptr; if (d_bias) { d_bias->mutable_data<T>(ctx.GetPlace()); set_zero(dev_ctx, d_bias, static_cast<T>(0)); d_bias_data = d_bias->data<T>(); } const T* scale_data = nullptr; if (scale) scale_data = scale->data<T>(); const T* bias_data = nullptr; if (bias) bias_data = bias->data<T>(); int imsize = 1; if (data_layout == DataLayout::kNCHW) { for (int i = 2; i < x_dims.size(); ++i) { imsize *= x_dims[i]; } } else { for (int i = 1; i < x_dims.size() - 1; ++i) { imsize *= x_dims[i]; } } #ifdef __HIPCC__ int block_size = ::max(::min(256, imsize), 64); #else int block_size = ::min(1024, imsize); #endif dim3 grid(group_size, groups, x_dims[0]); dim3 threads(block_size, 1, 1); int flags = (scale_data != nullptr) * kHasScale + (bias_data != nullptr) * kHasBias; UNROLL_ALL_CASES(flags, GroupNormBackwardGetMeanAndVar, x_data, scale_data, bias_data, y_data, x_dims[0], C, W, imsize, groups, group_size, epsilon, temp_mean_data, temp_var_data, d_scale_data, d_bias_data, data_layout); if (d_x_data != nullptr) { UNROLL_ALL_CASES(flags, GroupNormBackward, x_data, y_data, scale_data, bias_data, var_data, temp_mean_data, temp_var_data, x_dims[0], C, W, imsize, groups, group_size, epsilon, d_x_data, data_layout); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( group_norm, ops::GroupNormKernel<paddle::platform::CUDADeviceContext, float>, ops::GroupNormKernel<paddle::platform::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( group_norm_grad, ops::GroupNormGradKernel<paddle::platform::CUDADeviceContext, float>, ops::GroupNormGradKernel<paddle::platform::CUDADeviceContext, double>);
e9f0154c9f869dee0a5b13163e8140de99bb3a48.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef __NVCC__ #include "cub/cub.cuh" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "paddle/fluid/operators/group_norm_op.h" #include "paddle/fluid/platform/cuda_device_function.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using DataLayout = framework::DataLayout; enum GroupNormKernelFlags { kHasScale = 1, kHasBias = 2 }; #define CHECK_CASE(i, flags, kernel_name, ...) \ if (i == flags) { \ kernel_name<T, i><<<grid, threads, 0, dev_ctx.stream()>>>(__VA_ARGS__); \ } // 0 for no scale, no bias // 1 for has scale, no bias // 2 for no scale, has bias // 3 for has scale, has bias #define UNROLL_ALL_CASES(flags, kernel_name, ...) \ CHECK_CASE(0, flags, kernel_name, __VA_ARGS__) \ CHECK_CASE(1, flags, kernel_name, __VA_ARGS__) \ CHECK_CASE(2, flags, kernel_name, __VA_ARGS__) \ CHECK_CASE(3, flags, kernel_name, __VA_ARGS__) template <typename T> __device__ __inline__ void CudaAtomicAddWithWarp(T* sum, T value) { typedef cub::WarpReduce<T> WarpReduce; typename WarpReduce::TempStorage temp_storage; value = WarpReduce(temp_storage).Sum(value); if (cub::LaneId() == 0) platform::CudaAtomicAdd(sum, value); } template <typename T> __global__ void GroupNormForwardGetMeanAndVar(const T* x, int N, int C, int W, int imsize, int groups, int group_size, T* mean, T* var, const DataLayout data_layout) { int gid = blockIdx.y; int cid = blockIdx.x; int bid = blockIdx.z; int H = imsize / W; int number = min(group_size, static_cast<int>(C - gid * group_size)); int ccid = gid * group_size + cid; if (ccid >= C) return; T x_mean = 0, x_var = 0; for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) { T val; if (data_layout == DataLayout::kNCHW) { val = x[(bid * C + ccid) * imsize + imid]; } else { int hid = imid / W; int wid = imid % W; val = x[(bid * H + hid) * W * C + wid * C + ccid]; } x_mean += val; x_var += val * val; } x_mean /= number * imsize; x_var /= number * imsize; CudaAtomicAddWithWarp(&mean[bid * groups + gid], x_mean); CudaAtomicAddWithWarp(&var[bid * groups + gid], x_var); } template <typename T, int flags> __global__ void GroupNormForward(const T* x, const T* mean, const T* var, const T* scale, const T* bias, int N, int C, int W, int imsize, int groups, int group_size, T epsilon, T* y, T* real_var, const DataLayout data_layout) { int gid = blockIdx.y; int cid = blockIdx.x; int bid = blockIdx.z; int H = imsize / W; int ccid = gid * group_size + cid; if (ccid >= C) return; T x_mean = mean[bid * groups + gid]; T x_var = var[bid * groups + gid]; x_var = x_var - x_mean * x_mean; T var_inv = 1.0 / sqrt(x_var + epsilon); if (cid == 0 && threadIdx.x == 0) real_var[bid * groups + gid] = x_var; for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) { T val; int hid, wid; if (data_layout == DataLayout::kNCHW) { val = x[(bid * C + ccid) * imsize + imid]; } else { hid = imid / W; wid = imid % W; val = x[(bid * H + hid) * W * C + wid * C + ccid]; } val = (val - x_mean) * var_inv; if (flags & kHasScale) val *= scale[gid * group_size + cid]; if (flags & kHasBias) val += bias[gid * group_size + cid]; if (data_layout == DataLayout::kNCHW) { y[(bid * C + ccid) * imsize + imid] = val; } else { y[(bid * H + hid) * W * C + wid * C + ccid] = val; } } } template <typename T> class GroupNormKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); const float epsilon = ctx.Attr<float>("epsilon"); auto* scale = ctx.Input<Tensor>("Scale"); auto* bias = ctx.Input<Tensor>("Bias"); auto* x = ctx.Input<Tensor>("X"); auto* y = ctx.Output<Tensor>("Y"); auto* mean = ctx.Output<Tensor>("Mean"); auto* var = ctx.Output<Tensor>("Variance"); const auto groups = ctx.Attr<int>("groups"); const auto x_dims = x->dims(); const int C = (data_layout == DataLayout::kNCHW ? x_dims[1] : x_dims[x_dims.size() - 1]); const int group_size = C / groups; const int W = (data_layout == DataLayout::kNCHW ? x_dims[x_dims.size() - 1] : x_dims[x_dims.size() - 2]); y->mutable_data<T>(ctx.GetPlace()); mean->mutable_data<T>(ctx.GetPlace()); var->mutable_data<T>(ctx.GetPlace()); math::SetConstant<platform::CUDADeviceContext, T> set_zero; auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); Tensor temp_var; temp_var.mutable_data<T>(var->dims(), ctx.GetPlace()); set_zero(dev_ctx, mean, static_cast<T>(0)); set_zero(dev_ctx, &temp_var, static_cast<T>(0)); auto* x_data = x->data<T>(); auto* y_data = y->data<T>(); auto* mean_data = mean->data<T>(); auto* var_data = var->data<T>(); auto* temp_var_data = temp_var.data<T>(); const T* scale_data = nullptr; if (scale) scale_data = scale->data<T>(); const T* bias_data = nullptr; if (bias) bias_data = bias->data<T>(); int imsize = 1; if (data_layout == DataLayout::kNCHW) { for (int i = 2; i < x_dims.size(); ++i) { imsize *= x_dims[i]; } } else { for (int i = 1; i < x_dims.size() - 1; ++i) { imsize *= x_dims[i]; } } #ifdef __HIPCC__ int block_size = std::max(std::min(256, imsize), 64); #else int block_size = std::min(1024, imsize); #endif dim3 grid(group_size, groups, x_dims[0]); dim3 threads(block_size, 1, 1); GroupNormForwardGetMeanAndVar<T><<<grid, threads, 0, dev_ctx.stream()>>>( x_data, x_dims[0], C, W, imsize, groups, group_size, mean_data, temp_var_data, data_layout); int flags = (scale_data != nullptr) * kHasScale + (bias_data != nullptr) * kHasBias; UNROLL_ALL_CASES(flags, GroupNormForward, x_data, mean_data, temp_var_data, scale_data, bias_data, x_dims[0], C, W, imsize, groups, group_size, epsilon, y_data, var_data, data_layout); } }; template <typename T, int flags> __global__ void GroupNormBackwardGetMeanAndVar( const T* x, const T* scale, const T* bias, const T* d_y, int N, int C, int W, int imsize, int groups, int group_size, T epsilon, T* d_mean, T* d_var, T* d_scale, T* d_bias, const DataLayout data_layout) { int gid = blockIdx.y; int cid = blockIdx.x; int bid = blockIdx.z; int H = imsize / W; int number = min(group_size, static_cast<int>(C - gid * group_size)); int ccid = gid * group_size + cid; if (ccid >= C) return; T x_scale = (flags & kHasScale) ? scale[ccid] : 1; T x_bias = (flags & kHasBias) ? bias[ccid] : 0; T x_scale_inv = 0; if (x_scale != 0) x_scale_inv = 1.0 / x_scale; T d_mean_data = 0, d_var_data = 0, d_scale_data = 0, d_bias_data = 0; for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) { T val, dval; if (data_layout == DataLayout::kNCHW) { val = x[(bid * C + ccid) * imsize + imid] - x_bias; dval = d_y[(bid * C + ccid) * imsize + imid]; } else { int hid = imid / W; int wid = imid % W; val = x[(bid * H + hid) * W * C + wid * C + ccid] - x_bias; dval = d_y[(bid * H + hid) * W * C + wid * C + ccid]; } d_var_data += val * dval; d_mean_data += dval * x_scale; val = val * x_scale_inv; d_bias_data += dval; d_scale_data += val * dval; } CudaAtomicAddWithWarp(&(d_mean[bid * groups + gid]), d_mean_data); CudaAtomicAddWithWarp(&(d_var[bid * groups + gid]), d_var_data); if (flags & kHasScale) CudaAtomicAddWithWarp(&(d_scale[ccid]), d_scale_data); if (flags & kHasBias) CudaAtomicAddWithWarp(&(d_bias[ccid]), d_bias_data); } template <typename T, int flags> __global__ void GroupNormBackward(const T* x, const T* d_y, const T* scale, const T* bias, const T* var, const T* d_mean, const T* d_var, int N, int C, int W, int imsize, int groups, int group_size, T epsilon, T* d_x, const DataLayout data_layout) { int gid = blockIdx.y; int cid = blockIdx.x; int bid = blockIdx.z; int H = imsize / W; int number = min(group_size, static_cast<int>(C - gid * group_size)); int ccid = gid * group_size + cid; if (ccid >= C) return; T x_var = var[bid * groups + gid]; T d_x_mean = d_mean[bid * groups + gid]; T d_x_var = d_var[bid * groups + gid]; T x_var_inv = 1.0 / sqrt(x_var + epsilon); T number_inv = 1.0 / (number * imsize); T x_scale = (flags & kHasScale) ? scale[ccid] : 1; T x_bias = (flags & kHasBias) ? bias[ccid] : 0; T x_scale_inv = 0; if (x_scale != 0) x_scale_inv = 1.0 / x_scale; for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) { if (data_layout == DataLayout::kNCHW) { T tmp = x[(bid * C + ccid) * imsize + imid]; T v_y = (tmp - x_bias) * x_scale_inv; T dly = d_y[(bid * C + ccid) * imsize + imid]; d_x[(bid * C + ccid) * imsize + imid] = x_var_inv * (dly * x_scale - number_inv * d_x_var * v_y - number_inv * d_x_mean); } else { int hid = imid / W; int wid = imid % W; T tmp = x[(bid * H + hid) * W * C + wid * C + ccid]; T v_y = (tmp - x_bias) * x_scale_inv; T dly = d_y[(bid * H + hid) * W * C + wid * C + ccid]; d_x[(bid * H + hid) * W * C + wid * C + ccid] = x_var_inv * (dly * x_scale - number_inv * d_x_var * v_y - number_inv * d_x_mean); } } } template <typename T> class GroupNormGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); const float epsilon = ctx.Attr<float>("epsilon"); auto* x = ctx.Input<Tensor>("Y"); auto* var = ctx.Input<Tensor>("Variance"); auto* scale = ctx.Input<Tensor>("Scale"); auto* bias = ctx.Input<Tensor>("Bias"); auto* d_y = ctx.Input<Tensor>(framework::GradVarName("Y")); const auto groups = ctx.Attr<int>("groups"); // init output auto* d_x = ctx.Output<Tensor>(framework::GradVarName("X")); auto* d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale")); auto* d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias")); const auto& x_dims = x->dims(); const int C = (data_layout == DataLayout::kNCHW ? x_dims[1] : x_dims[x_dims.size() - 1]); const int group_size = C / groups; const int W = (data_layout == DataLayout::kNCHW ? x_dims[x_dims.size() - 1] : x_dims[x_dims.size() - 2]); d_x->mutable_data<T>(ctx.GetPlace()); math::SetConstant<platform::CUDADeviceContext, T> set_zero; auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); Tensor temp_var; temp_var.mutable_data<T>(var->dims(), ctx.GetPlace()); set_zero(dev_ctx, &temp_var, static_cast<T>(0)); T* temp_var_data = temp_var.data<T>(); Tensor temp_mean; temp_mean.mutable_data<T>(var->dims(), ctx.GetPlace()); set_zero(dev_ctx, &temp_mean, static_cast<T>(0)); T* temp_mean_data = temp_mean.data<T>(); auto* x_data = x->data<T>(); T* d_x_data = nullptr; if (d_x) d_x_data = d_x->data<T>(); auto* y_data = d_y->data<T>(); auto* var_data = var->data<T>(); T* d_scale_data = nullptr; if (d_scale) { d_scale->mutable_data<T>(ctx.GetPlace()); set_zero(dev_ctx, d_scale, static_cast<T>(0)); d_scale_data = d_scale->data<T>(); } T* d_bias_data = nullptr; if (d_bias) { d_bias->mutable_data<T>(ctx.GetPlace()); set_zero(dev_ctx, d_bias, static_cast<T>(0)); d_bias_data = d_bias->data<T>(); } const T* scale_data = nullptr; if (scale) scale_data = scale->data<T>(); const T* bias_data = nullptr; if (bias) bias_data = bias->data<T>(); int imsize = 1; if (data_layout == DataLayout::kNCHW) { for (int i = 2; i < x_dims.size(); ++i) { imsize *= x_dims[i]; } } else { for (int i = 1; i < x_dims.size() - 1; ++i) { imsize *= x_dims[i]; } } #ifdef __HIPCC__ int block_size = std::max(std::min(256, imsize), 64); #else int block_size = std::min(1024, imsize); #endif dim3 grid(group_size, groups, x_dims[0]); dim3 threads(block_size, 1, 1); int flags = (scale_data != nullptr) * kHasScale + (bias_data != nullptr) * kHasBias; UNROLL_ALL_CASES(flags, GroupNormBackwardGetMeanAndVar, x_data, scale_data, bias_data, y_data, x_dims[0], C, W, imsize, groups, group_size, epsilon, temp_mean_data, temp_var_data, d_scale_data, d_bias_data, data_layout); if (d_x_data != nullptr) { UNROLL_ALL_CASES(flags, GroupNormBackward, x_data, y_data, scale_data, bias_data, var_data, temp_mean_data, temp_var_data, x_dims[0], C, W, imsize, groups, group_size, epsilon, d_x_data, data_layout); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( group_norm, ops::GroupNormKernel<paddle::platform::CUDADeviceContext, float>, ops::GroupNormKernel<paddle::platform::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( group_norm_grad, ops::GroupNormGradKernel<paddle::platform::CUDADeviceContext, float>, ops::GroupNormGradKernel<paddle::platform::CUDADeviceContext, double>);
2c1b6c3862fe22e4d509636e890e46770301557f.hip
// !!! This is a file automatically generated by hipify!!! #include "gtest/gtest.h" #include <gsl/gsl> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thrust/device_malloc.h> #include <thrust/device_free.h> #include <thrust/device_new.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <thrust/generate.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <algorithm> #include "macros.h" /** @file test/test_build_cuda.cpp * Test if cuda is found on the system and is useable on the machine. */ TEST(CUDA, init) { int NbrDevices; hipGetDeviceCount(&NbrDevices); ASSERT_GT(NbrDevices, 0) << "No Cuda devices were found"; } // copied from TEST(CUDA, thrust_call) { constexpr std::size_t VectorSize = 10000000u; // generate many random numbers thrust::host_vector<int> HVec(VectorSize); ASSERT_EQ(HVec.size(), VectorSize) << "Host vector not created with correct size"; std::generate(HVec.begin(), HVec.end(), rand); // transfer data to the device thrust::device_vector<int> DVec = HVec; ASSERT_EQ(DVec.size(), VectorSize) << "Device Vector not created with correct size"; // sort data on the device thrust::sort(DVec.begin(), DVec.end()); ASSERT_TRUE(thrust::is_sorted(DVec.begin(), DVec.end())) << "Sorted on GPU"; // transfer data back to host thrust::copy(DVec.begin(), DVec.end(), HVec.begin()); ASSERT_TRUE(std::is_sorted(HVec.begin(), HVec.end())) << "Vector is not sorted"; } struct ASimpleClass { __host__ __device__ ASimpleClass(int value) : value{value} {} int value; }; struct SimpleClass { __host__ __device__ SimpleClass(ASimpleClass* common_constant) : value{0}, common_constant{common_constant} {} __host__ __device__ SimpleClass(int v, ASimpleClass* common_constant) : value{v}, common_constant{common_constant} {} __host__ __device__ SimpleClass(const SimpleClass& o) : value{o.value}, common_constant{o.common_constant} {} int value; ASimpleClass* common_constant; }; struct Square { CUCALL SimpleClass operator()(const SimpleClass& o) { return SimpleClass{o.value * o.value + o.common_constant->value, o.common_constant}; } }; TEST(CUDA, thrust_with_object) { const auto cvptr= thrust::device_malloc<ASimpleClass>(1); const auto csptr= thrust::device_new(cvptr, ASimpleClass{42}); const auto vptr = thrust::device_malloc<SimpleClass>(100); const auto sptr = thrust::device_new(vptr, SimpleClass{csptr.get()}, 100); auto _ = gsl::finally([&cvptr,&vptr]() { thrust::device_free(cvptr); thrust::device_free(vptr); }); thrust::fill(sptr, sptr + 100, SimpleClass{15, csptr.get()}); thrust::transform(sptr, sptr + 100, sptr, Square{}); } TEST(CUDA, thrust_with_vector) { const auto cvptr= thrust::device_malloc<ASimpleClass>(1); const auto csptr= thrust::device_new(cvptr, ASimpleClass{42}); auto _ = gsl::finally([&cvptr]() { thrust::device_free(cvptr); }); thrust::device_vector<SimpleClass> vec(100, SimpleClass{15, csptr.get()}); thrust::fill(vec.begin(), vec.end(), SimpleClass{30, csptr.get()}); thrust::transform(vec.begin(), vec.end(), vec.begin(), Square{}); } int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
2c1b6c3862fe22e4d509636e890e46770301557f.cu
#include "gtest/gtest.h" #include <gsl/gsl> #include <cuda.h> #include <cuda_runtime.h> #include <thrust/device_malloc.h> #include <thrust/device_free.h> #include <thrust/device_new.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <thrust/generate.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <algorithm> #include "macros.h" /** @file test/test_build_cuda.cpp * Test if cuda is found on the system and is useable on the machine. */ TEST(CUDA, init) { int NbrDevices; cudaGetDeviceCount(&NbrDevices); ASSERT_GT(NbrDevices, 0) << "No Cuda devices were found"; } // copied from TEST(CUDA, thrust_call) { constexpr std::size_t VectorSize = 10000000u; // generate many random numbers thrust::host_vector<int> HVec(VectorSize); ASSERT_EQ(HVec.size(), VectorSize) << "Host vector not created with correct size"; std::generate(HVec.begin(), HVec.end(), rand); // transfer data to the device thrust::device_vector<int> DVec = HVec; ASSERT_EQ(DVec.size(), VectorSize) << "Device Vector not created with correct size"; // sort data on the device thrust::sort(DVec.begin(), DVec.end()); ASSERT_TRUE(thrust::is_sorted(DVec.begin(), DVec.end())) << "Sorted on GPU"; // transfer data back to host thrust::copy(DVec.begin(), DVec.end(), HVec.begin()); ASSERT_TRUE(std::is_sorted(HVec.begin(), HVec.end())) << "Vector is not sorted"; } struct ASimpleClass { __host__ __device__ ASimpleClass(int value) : value{value} {} int value; }; struct SimpleClass { __host__ __device__ SimpleClass(ASimpleClass* common_constant) : value{0}, common_constant{common_constant} {} __host__ __device__ SimpleClass(int v, ASimpleClass* common_constant) : value{v}, common_constant{common_constant} {} __host__ __device__ SimpleClass(const SimpleClass& o) : value{o.value}, common_constant{o.common_constant} {} int value; ASimpleClass* common_constant; }; struct Square { CUCALL SimpleClass operator()(const SimpleClass& o) { return SimpleClass{o.value * o.value + o.common_constant->value, o.common_constant}; } }; TEST(CUDA, thrust_with_object) { const auto cvptr= thrust::device_malloc<ASimpleClass>(1); const auto csptr= thrust::device_new(cvptr, ASimpleClass{42}); const auto vptr = thrust::device_malloc<SimpleClass>(100); const auto sptr = thrust::device_new(vptr, SimpleClass{csptr.get()}, 100); auto _ = gsl::finally([&cvptr,&vptr]() { thrust::device_free(cvptr); thrust::device_free(vptr); }); thrust::fill(sptr, sptr + 100, SimpleClass{15, csptr.get()}); thrust::transform(sptr, sptr + 100, sptr, Square{}); } TEST(CUDA, thrust_with_vector) { const auto cvptr= thrust::device_malloc<ASimpleClass>(1); const auto csptr= thrust::device_new(cvptr, ASimpleClass{42}); auto _ = gsl::finally([&cvptr]() { thrust::device_free(cvptr); }); thrust::device_vector<SimpleClass> vec(100, SimpleClass{15, csptr.get()}); thrust::fill(vec.begin(), vec.end(), SimpleClass{30, csptr.get()}); thrust::transform(vec.begin(), vec.end(), vec.begin(), Square{}); } int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
36ceefcac66b07c3756b0cf7fa62516876a5b068.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/common/data_type.h" #include "oneflow/core/ep/include/device.h" #include "oneflow/core/ep/cuda/cuda_stream.h" #include "oneflow/core/framework/framework.h" #include "oneflow/user/kernels/distributions/common.h" #include "oneflow/user/kernels/random_seed_util.h" // NOTE(Liang Depeng): The implementation of MultinomialWithReplacementGpuKernel is modified from // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/cuda/MultinomialKernel.cu#L324 namespace oneflow { namespace { template<typename T> __device__ int binarySearchForMultinomial(const T* cumdist, const T* dist, int32_t size, T val) { int start = 0; int end = size; while (end - start > 0) { int mid = start + (end - start) / 2; T midVal = cumdist[mid]; if (midVal < val) { start = mid + 1; } else { end = mid; } } if (start == size) { // No probability mass or precision problems; just return the // first non-zero element by setting start to size-1 here, // the code below will move it to the last non-zero probability // this actually can happen when the random number is 1 // (github pytorch issue #4858). start = size - 1; } while (start >= 1 && dist[start] == 0) start--; return start; } template<typename T> __global__ void sampleMultinomialWithReplacement(uint64_t seed, uint64_t offset, int32_t totalSamples, int64_t* dest, int64_t distributions, int64_t categories, const T* normDistPrefixSum, const T* normDist) { // At the moment, each warp computes one sample value in the binary // search due to divergence. It seems possible to compute multiple // values and limit divergence though later on. // global index formula for 2D grid of 1D blocks int idx = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; hiprandStatePhilox4_32_10_t state; hiprand_init(seed, idx, offset, &state); // The block determines the distribution for which we generate a point for (int64_t curDist = blockIdx.y; curDist < distributions; curDist += gridDim.y) { for (int sample = blockIdx.x * blockDim.x + threadIdx.x; sample < totalSamples; sample += blockDim.x * gridDim.x) { // we are losing 3 out of 4 generated numbers but it's ok // this kernel is not very efficient anyway auto rand = hiprand_uniform4(&state); T r = static_cast<T>(rand.x); // Find the bucket that a uniform sample lies in int choice = binarySearchForMultinomial<T>(normDistPrefixSum + curDist * categories, normDist + curDist * categories, categories, r); dest[curDist * totalSamples + sample] = choice; } } } } // namespace template<typename T> class MultinomialWithReplacementGpuKernel final : public user_op::OpKernel { public: MultinomialWithReplacementGpuKernel() = default; ~MultinomialWithReplacementGpuKernel() = default; std::shared_ptr<user_op::OpKernelState> CreateOpKernelState( user_op::KernelInitContext* ctx) const override { const auto& generator = CHECK_JUST(one::MakeGenerator(DeviceType::kCUDA)); // When SBP is Split, each rank uses a different seeds, otherwise, ranks use the same seed generator->set_current_seed( CHECK_JUST(GetOpKernelRandomSeedInCurrentRank(ctx, ctx->Attr<int64_t>("seed")))); return std::make_shared<DistributionKernelState>(generator); } private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state, const user_op::OpKernelCache*) const override { auto* distribution_state = dynamic_cast<DistributionKernelState*>(state); CHECK_NOTNULL(distribution_state); const auto& generator = distribution_state->generator(); CHECK_NOTNULL(generator); auto gpu_gen = CHECK_JUST(generator->Get<ep::CUDAGenerator>()); const user_op::Tensor* norm_dist = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* prefix_sum = ctx->Tensor4ArgNameAndIndex("prefix_sum", 0); CHECK_NOTNULL(prefix_sum); user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); const T* norm_dist_ptr = norm_dist->dptr<T>(); const T* prefix_sum_ptr = prefix_sum->dptr<T>(); int64_t* result_ptr = out->mut_dptr<int64_t>(); int64_t numCategories = norm_dist->shape_view().At(norm_dist->shape_view().NumAxes() - 1); int64_t numDist = norm_dist->shape_view().NumAxes() > 1 ? norm_dist->shape_view().At(0) : 1; const int32_t n_sample = ctx->Attr<int32_t>("num_samples"); // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(128); ep::CudaStream* stream = ctx->stream()->As<ep::CudaStream>(); // Each block will generate a sample from one // distribution concurrently. int grid_y = std::min<int>(numDist, stream->device_properties().maxGridSize[1]); dim3 grid((n_sample - 1) / block.x + 1, grid_y); uint64_t seed = gpu_gen->current_seed(); uint64_t offset = gpu_gen->get_philox_offset(((numDist - 1) / grid.y + 1) * 4); // Sample with replacement hipLaunchKernelGGL(( sampleMultinomialWithReplacement), dim3(grid), dim3(block), 0, stream->cuda_stream(), seed, offset, n_sample, result_ptr, numDist, numCategories, prefix_sum_ptr, norm_dist_ptr); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_MULTINOMIAL_WITH_REPLACEMENT_GPU_KERNEL(dtype) \ REGISTER_USER_KERNEL("multinomial_with_replacement") \ .SetCreateFn<MultinomialWithReplacementGpuKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \ && (user_op::HobDataType("x", 0) == GetDataType<dtype>::value) \ && (user_op::HobDataType("prefix_sum", 0) == GetDataType<dtype>::value)); REGISTER_MULTINOMIAL_WITH_REPLACEMENT_GPU_KERNEL(float) REGISTER_MULTINOMIAL_WITH_REPLACEMENT_GPU_KERNEL(double) } // namespace oneflow
36ceefcac66b07c3756b0cf7fa62516876a5b068.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/common/data_type.h" #include "oneflow/core/ep/include/device.h" #include "oneflow/core/ep/cuda/cuda_stream.h" #include "oneflow/core/framework/framework.h" #include "oneflow/user/kernels/distributions/common.h" #include "oneflow/user/kernels/random_seed_util.h" // NOTE(Liang Depeng): The implementation of MultinomialWithReplacementGpuKernel is modified from // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/cuda/MultinomialKernel.cu#L324 namespace oneflow { namespace { template<typename T> __device__ int binarySearchForMultinomial(const T* cumdist, const T* dist, int32_t size, T val) { int start = 0; int end = size; while (end - start > 0) { int mid = start + (end - start) / 2; T midVal = cumdist[mid]; if (midVal < val) { start = mid + 1; } else { end = mid; } } if (start == size) { // No probability mass or precision problems; just return the // first non-zero element by setting start to size-1 here, // the code below will move it to the last non-zero probability // this actually can happen when the random number is 1 // (github pytorch issue #4858). start = size - 1; } while (start >= 1 && dist[start] == 0) start--; return start; } template<typename T> __global__ void sampleMultinomialWithReplacement(uint64_t seed, uint64_t offset, int32_t totalSamples, int64_t* dest, int64_t distributions, int64_t categories, const T* normDistPrefixSum, const T* normDist) { // At the moment, each warp computes one sample value in the binary // search due to divergence. It seems possible to compute multiple // values and limit divergence though later on. // global index formula for 2D grid of 1D blocks int idx = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; curandStatePhilox4_32_10_t state; curand_init(seed, idx, offset, &state); // The block determines the distribution for which we generate a point for (int64_t curDist = blockIdx.y; curDist < distributions; curDist += gridDim.y) { for (int sample = blockIdx.x * blockDim.x + threadIdx.x; sample < totalSamples; sample += blockDim.x * gridDim.x) { // we are losing 3 out of 4 generated numbers but it's ok // this kernel is not very efficient anyway auto rand = curand_uniform4(&state); T r = static_cast<T>(rand.x); // Find the bucket that a uniform sample lies in int choice = binarySearchForMultinomial<T>(normDistPrefixSum + curDist * categories, normDist + curDist * categories, categories, r); dest[curDist * totalSamples + sample] = choice; } } } } // namespace template<typename T> class MultinomialWithReplacementGpuKernel final : public user_op::OpKernel { public: MultinomialWithReplacementGpuKernel() = default; ~MultinomialWithReplacementGpuKernel() = default; std::shared_ptr<user_op::OpKernelState> CreateOpKernelState( user_op::KernelInitContext* ctx) const override { const auto& generator = CHECK_JUST(one::MakeGenerator(DeviceType::kCUDA)); // When SBP is Split, each rank uses a different seeds, otherwise, ranks use the same seed generator->set_current_seed( CHECK_JUST(GetOpKernelRandomSeedInCurrentRank(ctx, ctx->Attr<int64_t>("seed")))); return std::make_shared<DistributionKernelState>(generator); } private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state, const user_op::OpKernelCache*) const override { auto* distribution_state = dynamic_cast<DistributionKernelState*>(state); CHECK_NOTNULL(distribution_state); const auto& generator = distribution_state->generator(); CHECK_NOTNULL(generator); auto gpu_gen = CHECK_JUST(generator->Get<ep::CUDAGenerator>()); const user_op::Tensor* norm_dist = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* prefix_sum = ctx->Tensor4ArgNameAndIndex("prefix_sum", 0); CHECK_NOTNULL(prefix_sum); user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); const T* norm_dist_ptr = norm_dist->dptr<T>(); const T* prefix_sum_ptr = prefix_sum->dptr<T>(); int64_t* result_ptr = out->mut_dptr<int64_t>(); int64_t numCategories = norm_dist->shape_view().At(norm_dist->shape_view().NumAxes() - 1); int64_t numDist = norm_dist->shape_view().NumAxes() > 1 ? norm_dist->shape_view().At(0) : 1; const int32_t n_sample = ctx->Attr<int32_t>("num_samples"); // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(128); ep::CudaStream* stream = ctx->stream()->As<ep::CudaStream>(); // Each block will generate a sample from one // distribution concurrently. int grid_y = std::min<int>(numDist, stream->device_properties().maxGridSize[1]); dim3 grid((n_sample - 1) / block.x + 1, grid_y); uint64_t seed = gpu_gen->current_seed(); uint64_t offset = gpu_gen->get_philox_offset(((numDist - 1) / grid.y + 1) * 4); // Sample with replacement sampleMultinomialWithReplacement<<<grid, block, 0, stream->cuda_stream()>>>( seed, offset, n_sample, result_ptr, numDist, numCategories, prefix_sum_ptr, norm_dist_ptr); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_MULTINOMIAL_WITH_REPLACEMENT_GPU_KERNEL(dtype) \ REGISTER_USER_KERNEL("multinomial_with_replacement") \ .SetCreateFn<MultinomialWithReplacementGpuKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \ && (user_op::HobDataType("x", 0) == GetDataType<dtype>::value) \ && (user_op::HobDataType("prefix_sum", 0) == GetDataType<dtype>::value)); REGISTER_MULTINOMIAL_WITH_REPLACEMENT_GPU_KERNEL(float) REGISTER_MULTINOMIAL_WITH_REPLACEMENT_GPU_KERNEL(double) } // namespace oneflow
44aae8ed2d5d547385c5a24dca9ccee80c9b81b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cmath> #include <vector> #include "caffe/layer.hpp" #include "caffe/layers/lstm_relu_layer.hpp" namespace caffe { template <typename Dtype> __device__ Dtype sigmoid(const Dtype x) { return Dtype(1) / (Dtype(1) + exp(-x)); } // template <typename Dtype> // __device__ Dtype tanh(const Dtype x) { // return Dtype(2) * sigmoid(Dtype(2) * x) - Dtype(1); // } template <typename Dtype> __global__ void LSTMReLUActsForward(const int nthreads, const int dim, const Dtype* X, Dtype* X_acts) { CUDA_KERNEL_LOOP(index, nthreads) { const int x_dim = 4 * dim; const int d = index % x_dim; if (d < 3 * dim) { X_acts[index] = sigmoid(X[index]); } else { X_acts[index] = X[index] > 0 ? X[index] : 0; // tanh(X[index]); } } } template <typename Dtype> __global__ void LSTMReLUUnitForward(const int nthreads, const int dim, const Dtype* C_prev, const Dtype* X, const Dtype* cont, Dtype* C, Dtype* H) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / dim; const int d = index % dim; const Dtype* X_offset = X + 4 * dim * n; const Dtype i = X_offset[d]; const Dtype f = X_offset[1 * dim + d]; const Dtype o = X_offset[2 * dim + d]; const Dtype g = X_offset[3 * dim + d]; const Dtype c_prev = C_prev[index]; const Dtype c = cont[n] * f * c_prev + i * g; C[index] = c; const Dtype relu_c = c > 0 ? c : 0; H[index] = o * relu_c; } } template <typename Dtype> void LSTMReLUUnitLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = top[1]->count(); const Dtype* C_prev = bottom[0]->gpu_data(); const Dtype* X = bottom[1]->gpu_data(); const Dtype* cont = bottom[2]->gpu_data(); Dtype* X_acts = X_acts_.mutable_gpu_data(); Dtype* C = top[0]->mutable_gpu_data(); Dtype* H = top[1]->mutable_gpu_data(); const int X_count = bottom[1]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( LSTMReLUActsForward<Dtype>), dim3(CAFFE_GET_BLOCKS(X_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, X_count, hidden_dim_, X, X_acts); CUDA_POST_KERNEL_CHECK; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( LSTMReLUUnitForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, hidden_dim_, C_prev, X_acts, cont, C, H); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void LSTMReLUUnitBackward(const int nthreads, const int dim, const Dtype* C_prev, const Dtype* X, const Dtype* C, const Dtype* H, const Dtype* cont, const Dtype* C_diff, const Dtype* H_diff, Dtype* C_prev_diff, Dtype* X_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / dim; const int d = index % dim; const Dtype* X_offset = X + 4 * dim * n; const Dtype i = X_offset[d]; const Dtype f = X_offset[1 * dim + d]; const Dtype o = X_offset[2 * dim + d]; const Dtype g = X_offset[3 * dim + d]; const Dtype c_prev = C_prev[index]; const Dtype c = C[index]; const Dtype relu_c = c > 0 ? c : 0; Dtype* c_prev_diff = C_prev_diff + index; Dtype* X_diff_offset = X_diff + 4 * dim * n; Dtype* i_diff = X_diff_offset + d; Dtype* f_diff = X_diff_offset + 1 * dim + d; Dtype* o_diff = X_diff_offset + 2 * dim + d; Dtype* g_diff = X_diff_offset + 3 * dim + d; const Dtype c_term_diff = C_diff[index] + H_diff[index] * o * (c > 0); const Dtype cont_n = cont[n]; *c_prev_diff = cont_n * c_term_diff * f; *i_diff = c_term_diff * g; *f_diff = cont_n * c_term_diff * c_prev; *o_diff = H_diff[index] * relu_c; *g_diff = c_term_diff * i; } } template <typename Dtype> __global__ void LSTMReLUActsBackward(const int nthreads, const int dim, const Dtype* X_acts, const Dtype* X_acts_diff, Dtype* X_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int x_dim = 4 * dim; const int d = index % x_dim; const Dtype X_act = X_acts[index]; if (d < 3 * dim) { X_diff[index] = X_acts_diff[index] * X_act * (Dtype(1) - X_act); } else { X_diff[index] = X_acts_diff[index] * (X_act > 0); } } } template <typename Dtype> void LSTMReLUUnitLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { CHECK(!propagate_down[2]) << "Cannot backpropagate to sequence indicators."; if (!propagate_down[0] && !propagate_down[1]) { return; } const int count = top[1]->count(); const Dtype* C_prev = bottom[0]->gpu_data(); const Dtype* X_acts = X_acts_.gpu_data(); const Dtype* cont = bottom[2]->gpu_data(); const Dtype* C = top[0]->gpu_data(); const Dtype* H = top[1]->gpu_data(); const Dtype* C_diff = top[0]->gpu_diff(); const Dtype* H_diff = top[1]->gpu_diff(); Dtype* C_prev_diff = bottom[0]->mutable_gpu_diff(); Dtype* X_acts_diff = X_acts_.mutable_gpu_diff(); LSTMReLUUnitBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, hidden_dim_, C_prev, X_acts, C, H, cont, C_diff, H_diff, C_prev_diff, X_acts_diff); CUDA_POST_KERNEL_CHECK; const int X_count = bottom[1]->count(); Dtype* X_diff = bottom[1]->mutable_gpu_diff(); LSTMReLUActsBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(X_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, X_count, hidden_dim_, X_acts, X_acts_diff, X_diff); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(LSTMReLUUnitLayer); } // namespace caffe
44aae8ed2d5d547385c5a24dca9ccee80c9b81b6.cu
#include <algorithm> #include <cmath> #include <vector> #include "caffe/layer.hpp" #include "caffe/layers/lstm_relu_layer.hpp" namespace caffe { template <typename Dtype> __device__ Dtype sigmoid(const Dtype x) { return Dtype(1) / (Dtype(1) + exp(-x)); } // template <typename Dtype> // __device__ Dtype tanh(const Dtype x) { // return Dtype(2) * sigmoid(Dtype(2) * x) - Dtype(1); // } template <typename Dtype> __global__ void LSTMReLUActsForward(const int nthreads, const int dim, const Dtype* X, Dtype* X_acts) { CUDA_KERNEL_LOOP(index, nthreads) { const int x_dim = 4 * dim; const int d = index % x_dim; if (d < 3 * dim) { X_acts[index] = sigmoid(X[index]); } else { X_acts[index] = X[index] > 0 ? X[index] : 0; // tanh(X[index]); } } } template <typename Dtype> __global__ void LSTMReLUUnitForward(const int nthreads, const int dim, const Dtype* C_prev, const Dtype* X, const Dtype* cont, Dtype* C, Dtype* H) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / dim; const int d = index % dim; const Dtype* X_offset = X + 4 * dim * n; const Dtype i = X_offset[d]; const Dtype f = X_offset[1 * dim + d]; const Dtype o = X_offset[2 * dim + d]; const Dtype g = X_offset[3 * dim + d]; const Dtype c_prev = C_prev[index]; const Dtype c = cont[n] * f * c_prev + i * g; C[index] = c; const Dtype relu_c = c > 0 ? c : 0; H[index] = o * relu_c; } } template <typename Dtype> void LSTMReLUUnitLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = top[1]->count(); const Dtype* C_prev = bottom[0]->gpu_data(); const Dtype* X = bottom[1]->gpu_data(); const Dtype* cont = bottom[2]->gpu_data(); Dtype* X_acts = X_acts_.mutable_gpu_data(); Dtype* C = top[0]->mutable_gpu_data(); Dtype* H = top[1]->mutable_gpu_data(); const int X_count = bottom[1]->count(); // NOLINT_NEXT_LINE(whitespace/operators) LSTMReLUActsForward<Dtype><<<CAFFE_GET_BLOCKS(X_count), CAFFE_CUDA_NUM_THREADS>>>( X_count, hidden_dim_, X, X_acts); CUDA_POST_KERNEL_CHECK; // NOLINT_NEXT_LINE(whitespace/operators) LSTMReLUUnitForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, hidden_dim_, C_prev, X_acts, cont, C, H); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void LSTMReLUUnitBackward(const int nthreads, const int dim, const Dtype* C_prev, const Dtype* X, const Dtype* C, const Dtype* H, const Dtype* cont, const Dtype* C_diff, const Dtype* H_diff, Dtype* C_prev_diff, Dtype* X_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / dim; const int d = index % dim; const Dtype* X_offset = X + 4 * dim * n; const Dtype i = X_offset[d]; const Dtype f = X_offset[1 * dim + d]; const Dtype o = X_offset[2 * dim + d]; const Dtype g = X_offset[3 * dim + d]; const Dtype c_prev = C_prev[index]; const Dtype c = C[index]; const Dtype relu_c = c > 0 ? c : 0; Dtype* c_prev_diff = C_prev_diff + index; Dtype* X_diff_offset = X_diff + 4 * dim * n; Dtype* i_diff = X_diff_offset + d; Dtype* f_diff = X_diff_offset + 1 * dim + d; Dtype* o_diff = X_diff_offset + 2 * dim + d; Dtype* g_diff = X_diff_offset + 3 * dim + d; const Dtype c_term_diff = C_diff[index] + H_diff[index] * o * (c > 0); const Dtype cont_n = cont[n]; *c_prev_diff = cont_n * c_term_diff * f; *i_diff = c_term_diff * g; *f_diff = cont_n * c_term_diff * c_prev; *o_diff = H_diff[index] * relu_c; *g_diff = c_term_diff * i; } } template <typename Dtype> __global__ void LSTMReLUActsBackward(const int nthreads, const int dim, const Dtype* X_acts, const Dtype* X_acts_diff, Dtype* X_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int x_dim = 4 * dim; const int d = index % x_dim; const Dtype X_act = X_acts[index]; if (d < 3 * dim) { X_diff[index] = X_acts_diff[index] * X_act * (Dtype(1) - X_act); } else { X_diff[index] = X_acts_diff[index] * (X_act > 0); } } } template <typename Dtype> void LSTMReLUUnitLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { CHECK(!propagate_down[2]) << "Cannot backpropagate to sequence indicators."; if (!propagate_down[0] && !propagate_down[1]) { return; } const int count = top[1]->count(); const Dtype* C_prev = bottom[0]->gpu_data(); const Dtype* X_acts = X_acts_.gpu_data(); const Dtype* cont = bottom[2]->gpu_data(); const Dtype* C = top[0]->gpu_data(); const Dtype* H = top[1]->gpu_data(); const Dtype* C_diff = top[0]->gpu_diff(); const Dtype* H_diff = top[1]->gpu_diff(); Dtype* C_prev_diff = bottom[0]->mutable_gpu_diff(); Dtype* X_acts_diff = X_acts_.mutable_gpu_diff(); LSTMReLUUnitBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, hidden_dim_, C_prev, X_acts, C, H, cont, C_diff, H_diff, C_prev_diff, X_acts_diff); CUDA_POST_KERNEL_CHECK; const int X_count = bottom[1]->count(); Dtype* X_diff = bottom[1]->mutable_gpu_diff(); LSTMReLUActsBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(X_count), CAFFE_CUDA_NUM_THREADS>>>( X_count, hidden_dim_, X_acts, X_acts_diff, X_diff); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(LSTMReLUUnitLayer); } // namespace caffe
d60df61a0e7059918fc4dbe11ff691c674cb84ba.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include "hip/hip_runtime.h" #include "cucheck.h" #include "dwt_gpu.h" int getBestThreadBlockSize(int imageSize) { if (imageSize >= 4096) { return 1024; } else if (imageSize < 512) { return 128; } else { // round imageSize/4 to the nearest power of 2 return 1 << (int)(log2((double)imageSize) - 2 + .5); } } int haar(float *output, float *input, int width, int steps, bool inverse, int blockSize) { float *plmemory; float elapsed; hipDeviceProp_t prop; CUCHECK(hipGetDeviceProperties(&prop, 0)); printf("GPU %d: %s\n", 0, prop.name); // Make a copy of the data for the GPU to use. // Allocate page-locked virtual memory (that won't be moved from its // position in physical memory) so the data can be copied to the GPU // via DMA This approximately double the throughput. Just be sure // to free the data with hipHostFree() rather than delete[]. CUCHECK(hipHostMalloc((void**)&plmemory, width*width*sizeof(float))); memcpy(plmemory, input, sizeof(float)*width*width); // run the GPU version of the algorithm if (blockSize == -1) blockSize = getBestThreadBlockSize(width); elapsed = haar_2d_cuda(width, plmemory, inverse, steps, blockSize, true); memcpy(output, plmemory, sizeof(float)*width*width); printf("CUDA: %.6f ms\n", elapsed); CUCHECK(hipHostFree(plmemory)); return 0; } // double support was added in version 1.3 #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 130) int haar(double *output, double *input, int width, int steps, bool inverse, int blockSize) { double *plmemory; float elapsed; hipDeviceProp_t prop; CUCHECK(hipGetDeviceProperties(&prop, 0)); printf("GPU %d: %s\n", 0, prop.name); // Make a copy of the data for the GPU to use. // Allocate page-locked virtual memory (that won't be moved from its // position in physical memory) so the data can be copied to the GPU // via DMA This approximately double the throughput. Just be sure // to free the data with hipHostFree() rather than delete[]. CUCHECK(hipHostMalloc((void**)&plmemory, width*width*sizeof(double))); memcpy(plmemory, input, sizeof(double)*width*width); // run the GPU version of the algorithm if (blockSize == -1) blockSize = getBestThreadBlockSize(width); elapsed = haar_2d_cuda(width, plmemory, inverse, steps, blockSize, true); memcpy(output, plmemory, sizeof(double)*width*width); printf("CUDA: %.6f ms\n", elapsed); CUCHECK(hipHostFree(plmemory)); return 0; } #endif // cuda 1.3
d60df61a0e7059918fc4dbe11ff691c674cb84ba.cu
#include <cmath> #include "cuda.h" #include "cucheck.h" #include "dwt_gpu.h" int getBestThreadBlockSize(int imageSize) { if (imageSize >= 4096) { return 1024; } else if (imageSize < 512) { return 128; } else { // round imageSize/4 to the nearest power of 2 return 1 << (int)(log2((double)imageSize) - 2 + .5); } } int haar(float *output, float *input, int width, int steps, bool inverse, int blockSize) { float *plmemory; float elapsed; cudaDeviceProp prop; CUCHECK(cudaGetDeviceProperties(&prop, 0)); printf("GPU %d: %s\n", 0, prop.name); // Make a copy of the data for the GPU to use. // Allocate page-locked virtual memory (that won't be moved from its // position in physical memory) so the data can be copied to the GPU // via DMA This approximately double the throughput. Just be sure // to free the data with cudaFreeHost() rather than delete[]. CUCHECK(cudaMallocHost((void**)&plmemory, width*width*sizeof(float))); memcpy(plmemory, input, sizeof(float)*width*width); // run the GPU version of the algorithm if (blockSize == -1) blockSize = getBestThreadBlockSize(width); elapsed = haar_2d_cuda(width, plmemory, inverse, steps, blockSize, true); memcpy(output, plmemory, sizeof(float)*width*width); printf("CUDA: %.6f ms\n", elapsed); CUCHECK(cudaFreeHost(plmemory)); return 0; } // double support was added in version 1.3 #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 130) int haar(double *output, double *input, int width, int steps, bool inverse, int blockSize) { double *plmemory; float elapsed; cudaDeviceProp prop; CUCHECK(cudaGetDeviceProperties(&prop, 0)); printf("GPU %d: %s\n", 0, prop.name); // Make a copy of the data for the GPU to use. // Allocate page-locked virtual memory (that won't be moved from its // position in physical memory) so the data can be copied to the GPU // via DMA This approximately double the throughput. Just be sure // to free the data with cudaFreeHost() rather than delete[]. CUCHECK(cudaMallocHost((void**)&plmemory, width*width*sizeof(double))); memcpy(plmemory, input, sizeof(double)*width*width); // run the GPU version of the algorithm if (blockSize == -1) blockSize = getBestThreadBlockSize(width); elapsed = haar_2d_cuda(width, plmemory, inverse, steps, blockSize, true); memcpy(output, plmemory, sizeof(double)*width*width); printf("CUDA: %.6f ms\n", elapsed); CUCHECK(cudaFreeHost(plmemory)); return 0; } #endif // cuda 1.3
6a1d34f8c7a0c3b1964166abce9b1153985f612b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include "AES.h" #include "main.h" using namespace std; int main(int argc, char **argv) { if(argc < 2) { printf("USAGE: benchmark FILE\n"); return 1; } uchar key[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; uint keySize = 32; //uchar pt_debug[] = { 0x03, 0x02, 0x01, 0x00, 0x07, 0x06, 0x05, 0x04, 0x0b, 0x0a, 0x09, 0x08, 0x0f, 0x0e, 0x0d, 0x0c }; uint *ct, *pt; FILE *f = fopen(argv[1], "rb"); if(f == NULL) { printf("File not found.\n"); return 1; } fseek(f, 0, SEEK_END); uint f_size = ftell(f); rewind(f); if(f_size % 4*sizeof(uint) != 0) { printf("Plaintext size must be a multiple of AES block size.\n"); return 1; } uint ptSize = f_size / sizeof(uint); #ifdef ASYNC hipHostMalloc((void**)&pt, f_size); hipHostMalloc((void**)&ct, f_size); #else pt = (uint*)malloc(f_size); ct = (uint *)malloc(f_size); #endif fread(pt, sizeof(uint), ptSize, f); fclose(f); AES *aes = new AES(); aes->makeKey(key, keySize << 3, DIR_ENCRYPT); /* uchar pt_debug[] = { 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf }; pt = (uint*)pt_debug; aes->encrypt_ecb(pt, ct, 4); printHexArray(ct, 4); */ aes->encrypt_ecb(pt, ct, ptSize); //printHexArray(ct, ptSize); return 0; } uint stringToUcharArray(char *str, uchar **array) { uint i, len = strlen(str) >> 1; *array = (uchar *)malloc(len * sizeof(uchar)); for(i=0; i<len; i++) sscanf(str + i*2, "%02X", *array+i); return len; } uint stringToUcharArray(char *str, uint **array) { uint i, len = strlen(str) >> 3; *array = (uint *)malloc(len * sizeof(uint)); for(i=0; i<len; i++) sscanf(str + i*8, "%08X", *array+i); return len; } void printHexArray(uint *array, uint size) { uint i; for(i=0; i<size; i++) printf("%08X", array[i]); printf("\n"); }
6a1d34f8c7a0c3b1964166abce9b1153985f612b.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include "AES.h" #include "main.h" using namespace std; int main(int argc, char **argv) { if(argc < 2) { printf("USAGE: benchmark FILE\n"); return 1; } uchar key[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; uint keySize = 32; //uchar pt_debug[] = { 0x03, 0x02, 0x01, 0x00, 0x07, 0x06, 0x05, 0x04, 0x0b, 0x0a, 0x09, 0x08, 0x0f, 0x0e, 0x0d, 0x0c }; uint *ct, *pt; FILE *f = fopen(argv[1], "rb"); if(f == NULL) { printf("File not found.\n"); return 1; } fseek(f, 0, SEEK_END); uint f_size = ftell(f); rewind(f); if(f_size % 4*sizeof(uint) != 0) { printf("Plaintext size must be a multiple of AES block size.\n"); return 1; } uint ptSize = f_size / sizeof(uint); #ifdef ASYNC cudaMallocHost((void**)&pt, f_size); cudaMallocHost((void**)&ct, f_size); #else pt = (uint*)malloc(f_size); ct = (uint *)malloc(f_size); #endif fread(pt, sizeof(uint), ptSize, f); fclose(f); AES *aes = new AES(); aes->makeKey(key, keySize << 3, DIR_ENCRYPT); /* uchar pt_debug[] = { 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf }; pt = (uint*)pt_debug; aes->encrypt_ecb(pt, ct, 4); printHexArray(ct, 4); */ aes->encrypt_ecb(pt, ct, ptSize); //printHexArray(ct, ptSize); return 0; } uint stringToUcharArray(char *str, uchar **array) { uint i, len = strlen(str) >> 1; *array = (uchar *)malloc(len * sizeof(uchar)); for(i=0; i<len; i++) sscanf(str + i*2, "%02X", *array+i); return len; } uint stringToUcharArray(char *str, uint **array) { uint i, len = strlen(str) >> 3; *array = (uint *)malloc(len * sizeof(uint)); for(i=0; i<len; i++) sscanf(str + i*8, "%08X", *array+i); return len; } void printHexArray(uint *array, uint size) { uint i; for(i=0; i<size; i++) printf("%08X", array[i]); printf("\n"); }
e79878ce4cdf94f67603a77b8e11eb368b31ed1d.hip
// !!! This is a file automatically generated by hipify!!! // Simple SUDOKU probram in CUDA // cmpe297_hw3_easysudoku.cu #include<stdio.h> #include<string.h> #include <hip/hip_runtime.h> const int big_2x[9][9] = {{1, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1, 1}}; // input 9x9 sudoku : // - 1~9 : valid values // - 0 : no value is decided const int input_sdk[9][9] = {{0, 7, 0, 0, 6, 5, 0, 8, 0}, {6, 0, 0, 0, 3, 0, 4, 0, 0}, {0, 2, 0, 0, 4, 0, 7, 0, 0}, {8, 6, 0, 0, 0, 2, 5, 7, 0}, {0, 0, 7, 4, 0, 6, 1, 0, 0}, {0, 5, 2, 3, 0, 0, 0, 6, 4}, {0, 0, 8, 0, 2, 0, 0, 3, 0}, {0, 0, 5, 0, 8, 0, 0, 0, 1}, {0, 4, 0, 7, 1, 0, 0, 5, 0}}; typedef struct { int val[9][9]; // values that each entry can get int num_options[9][9]; // number of values that each entry can get int not_in_cell[9][9]; // values not in each 3x3 cell int not_in_row[9][9]; // values not in each row int not_in_col[9][9]; // values not in each column } stContext; stContext context; void initialize_all(); void print_all(); #define WIDTH 9 #define IS_OPTION(row, col, k) \ ((context->not_in_row[row][k] == 1) && \ (context->not_in_col[col][k] == 1) && \ (context->not_in_cell[row/3+(col/3)*3][k] == 1))? 1 : 0; // rule: numbers should be unique in each sub-array, each row, and each column __global__ void k_Sudoku(stContext *context) { const unsigned int col = threadIdx.x; const unsigned int row = threadIdx.y; // TODO: Insert Your Code Here } int main(int argc, char **argv) { hipError_t err; initialize_all(); print_all(); stContext *k_context; err = // TODO: Allocate matrix in GPU device memory if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device data (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = // TODO: Copy the input matrix to GPU if (err != hipSuccess) { fprintf(stderr, "Failed to copy data from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Assign as many threads as the matrix size so that // each thread can deal with one entry of the matrix dim3 dimBlock(WIDTH, WIDTH, 1); dim3 dimGrid(1, 1, 1); // TODO: Call the kernel function err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Kernel execution failed (error code %s)\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipDeviceSynchronize(); err = // TODO: Copy the result matrix from the GPU device memory if (err != hipSuccess) { fprintf(stderr, "Failed to copy data from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipDeviceSynchronize(); // Print the result print_all(); // Free the device memory err = hipFree(k_context); if (err != hipSuccess) { fprintf(stderr, "Failed to free gpu data (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } getchar(); return 0; } void initialize_all() { int i, j; memcpy(context.not_in_cell,big_2x, sizeof(big_2x)); memcpy(context.not_in_row,big_2x, sizeof(big_2x)); memcpy(context.not_in_col,big_2x, sizeof(big_2x)); for(i=0; i<9; i++){ for(j=0; j<9; j++){ if(input_sdk[i][j] == 0) { context.val[i][j] = 0; context.num_options[i][j]=9; } else { context.val[i][j] = input_sdk[i][j]; context.num_options[i][j] = 1; context.not_in_cell[i/3+(j/3)*3][input_sdk[i][j]-1] = 0; context.not_in_col[j][input_sdk[i][j]-1] = 0; context.not_in_row[i][input_sdk[i][j]-1] = 0; } } } } void print_all() { int i, j, k; for(i=0; i<9; i++){ for(j=0; j<9; j++){ if(context.val[i][j] == 0) fprintf(stdout, " %1d ", context.val[i][j]); else fprintf(stdout, " *%1d* ", context.val[i][j]); if((j==2)||(j==5)){ fprintf(stdout, "| "); } } fprintf(stdout, "\n"); if((i==2)||(i==5)){ for(k=0; k<69; k++){ fprintf(stdout, "-"); } fprintf(stdout, "\n"); } } fprintf(stdout, "\n"); }
e79878ce4cdf94f67603a77b8e11eb368b31ed1d.cu
// Simple SUDOKU probram in CUDA // cmpe297_hw3_easysudoku.cu #include<stdio.h> #include<string.h> #include <cuda_runtime.h> const int big_2x[9][9] = {{1, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1, 1}}; // input 9x9 sudoku : // - 1~9 : valid values // - 0 : no value is decided const int input_sdk[9][9] = {{0, 7, 0, 0, 6, 5, 0, 8, 0}, {6, 0, 0, 0, 3, 0, 4, 0, 0}, {0, 2, 0, 0, 4, 0, 7, 0, 0}, {8, 6, 0, 0, 0, 2, 5, 7, 0}, {0, 0, 7, 4, 0, 6, 1, 0, 0}, {0, 5, 2, 3, 0, 0, 0, 6, 4}, {0, 0, 8, 0, 2, 0, 0, 3, 0}, {0, 0, 5, 0, 8, 0, 0, 0, 1}, {0, 4, 0, 7, 1, 0, 0, 5, 0}}; typedef struct { int val[9][9]; // values that each entry can get int num_options[9][9]; // number of values that each entry can get int not_in_cell[9][9]; // values not in each 3x3 cell int not_in_row[9][9]; // values not in each row int not_in_col[9][9]; // values not in each column } stContext; stContext context; void initialize_all(); void print_all(); #define WIDTH 9 #define IS_OPTION(row, col, k) \ ((context->not_in_row[row][k] == 1) && \ (context->not_in_col[col][k] == 1) && \ (context->not_in_cell[row/3+(col/3)*3][k] == 1))? 1 : 0; // rule: numbers should be unique in each sub-array, each row, and each column __global__ void k_Sudoku(stContext *context) { const unsigned int col = threadIdx.x; const unsigned int row = threadIdx.y; // TODO: Insert Your Code Here } int main(int argc, char **argv) { cudaError_t err; initialize_all(); print_all(); stContext *k_context; err = // TODO: Allocate matrix in GPU device memory if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device data (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = // TODO: Copy the input matrix to GPU if (err != cudaSuccess) { fprintf(stderr, "Failed to copy data from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Assign as many threads as the matrix size so that // each thread can deal with one entry of the matrix dim3 dimBlock(WIDTH, WIDTH, 1); dim3 dimGrid(1, 1, 1); // TODO: Call the kernel function err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Kernel execution failed (error code %s)\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaThreadSynchronize(); err = // TODO: Copy the result matrix from the GPU device memory if (err != cudaSuccess) { fprintf(stderr, "Failed to copy data from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaThreadSynchronize(); // Print the result print_all(); // Free the device memory err = cudaFree(k_context); if (err != cudaSuccess) { fprintf(stderr, "Failed to free gpu data (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } getchar(); return 0; } void initialize_all() { int i, j; memcpy(context.not_in_cell,big_2x, sizeof(big_2x)); memcpy(context.not_in_row,big_2x, sizeof(big_2x)); memcpy(context.not_in_col,big_2x, sizeof(big_2x)); for(i=0; i<9; i++){ for(j=0; j<9; j++){ if(input_sdk[i][j] == 0) { context.val[i][j] = 0; context.num_options[i][j]=9; } else { context.val[i][j] = input_sdk[i][j]; context.num_options[i][j] = 1; context.not_in_cell[i/3+(j/3)*3][input_sdk[i][j]-1] = 0; context.not_in_col[j][input_sdk[i][j]-1] = 0; context.not_in_row[i][input_sdk[i][j]-1] = 0; } } } } void print_all() { int i, j, k; for(i=0; i<9; i++){ for(j=0; j<9; j++){ if(context.val[i][j] == 0) fprintf(stdout, " %1d ", context.val[i][j]); else fprintf(stdout, " *%1d* ", context.val[i][j]); if((j==2)||(j==5)){ fprintf(stdout, "| "); } } fprintf(stdout, "\n"); if((i==2)||(i==5)){ for(k=0; k<69; k++){ fprintf(stdout, "-"); } fprintf(stdout, "\n"); } } fprintf(stdout, "\n"); }
d23aee9844b6c9ef84d46523abd8e2df2fb50a9b.hip
// !!! This is a file automatically generated by hipify!!! #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <thrust/device_ptr.h> #include <thrust/sort.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <iostream> #define WARP_SIZE 32 /////// CREATE THE ARGUMENT MATCHING ARRAY /////// ////////////////////////////////////////////////// /* Find how many arguments I share with all the other tid */ __global__ void find_likeness(int* input, int* likeness, int tid, int num_block, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { for (int i = 0; i < num_block; i++) { if (input[tid*num_block+i] == input[idx*num_block+i]) { likeness[idx]++; } } } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// //////////// MAIN ALGORITHM ///////////// ///////////////////////////////////////// void create_warp_map(int** input_array, int* result_array, int N, int num_block) { int totalBytes = sizeof(int) * N; // compute number of Blocks and Threads per block const int threadsPerBlock = 32; const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock; // DEVICE MEMORY int* device_likeness; // This is for the likeness of the threads hipMalloc((void**) &device_likeness, totalBytes); // 2-D array of the input values int* device_input_array; hipMalloc((void**)&device_input_array, num_block*totalBytes); hipMemcpy(device_input_array, input_array, num_block*totalBytes, hipMemcpyHostToDevice); // Host Memory std::vector<bool> visited(N); // This is for if a node is already assigned a warp visited.clear(); int num_mapped = 0; // How many have been mapped int cur_index = 0; // First tid in the current warp // Initiliaze our first tid in our first warp visited[cur_index] = true; result_array[num_mapped] = cur_index; num_mapped++; // Until I have filled all of my output array while (num_mapped < N) { hipMemset(device_likeness, 0, totalBytes); // Find argument matches based on an index hipLaunchKernelGGL(( find_likeness), dim3(blocks), dim3(threadsPerBlock), 0, 0, device_input_array, device_likeness, cur_index, num_block, N); hipDeviceSynchronize(); // Sort the index thrust array based on the value thrust array thrust::device_ptr<int> index = thrust::device_malloc<int>(N); thrust::sequence(index, index + N); thrust::device_ptr<int> ptr_likeness(device_likeness); thrust::sort_by_key(ptr_likeness, ptr_likeness + N, index, thrust::greater<int>()); /* FOR DEBUGGING thrust::host_vector<int> idx(index, index + N); thrust::host_vector<int> like(ptr_likeness, ptr_likeness + N); for (int i = 0; i < N; i++) { printf("%d %d\n", idx[i], like[i]); } */ // Loop through the sorted list of arg match and pick the best 31 for (int i = 0; i < N; i++) { // After sorting grab the 32 who arent visited yet int pos_index = index[i]; // If its not already in a warp then put it in this warp if (visited[pos_index] == false) { visited[pos_index] = true; result_array[num_mapped] = pos_index; num_mapped++; } // If this warp has been filled move on if ((num_mapped % WARP_SIZE) == 0) { break; } } // Start a new warp for (int i = 0; i < N; i++) { // Find the first index with visited == false if (visited[i] == false) { cur_index = i; visited[cur_index] = true; result_array[num_mapped] = cur_index; num_mapped++; break; } } } } ///////////////////////////////////////// /////////////////////////////////////////
d23aee9844b6c9ef84d46523abd8e2df2fb50a9b.cu
#include <thrust/device_vector.h> #include <thrust/sequence.h> #include <thrust/device_ptr.h> #include <thrust/sort.h> #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <iostream> #define WARP_SIZE 32 /////// CREATE THE ARGUMENT MATCHING ARRAY /////// ////////////////////////////////////////////////// /* Find how many arguments I share with all the other tid */ __global__ void find_likeness(int* input, int* likeness, int tid, int num_block, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { for (int i = 0; i < num_block; i++) { if (input[tid*num_block+i] == input[idx*num_block+i]) { likeness[idx]++; } } } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// //////////// MAIN ALGORITHM ///////////// ///////////////////////////////////////// void create_warp_map(int** input_array, int* result_array, int N, int num_block) { int totalBytes = sizeof(int) * N; // compute number of Blocks and Threads per block const int threadsPerBlock = 32; const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock; // DEVICE MEMORY int* device_likeness; // This is for the likeness of the threads cudaMalloc((void**) &device_likeness, totalBytes); // 2-D array of the input values int* device_input_array; cudaMalloc((void**)&device_input_array, num_block*totalBytes); cudaMemcpy(device_input_array, input_array, num_block*totalBytes, cudaMemcpyHostToDevice); // Host Memory std::vector<bool> visited(N); // This is for if a node is already assigned a warp visited.clear(); int num_mapped = 0; // How many have been mapped int cur_index = 0; // First tid in the current warp // Initiliaze our first tid in our first warp visited[cur_index] = true; result_array[num_mapped] = cur_index; num_mapped++; // Until I have filled all of my output array while (num_mapped < N) { cudaMemset(device_likeness, 0, totalBytes); // Find argument matches based on an index find_likeness<<<blocks, threadsPerBlock>>>(device_input_array, device_likeness, cur_index, num_block, N); cudaDeviceSynchronize(); // Sort the index thrust array based on the value thrust array thrust::device_ptr<int> index = thrust::device_malloc<int>(N); thrust::sequence(index, index + N); thrust::device_ptr<int> ptr_likeness(device_likeness); thrust::sort_by_key(ptr_likeness, ptr_likeness + N, index, thrust::greater<int>()); /* FOR DEBUGGING thrust::host_vector<int> idx(index, index + N); thrust::host_vector<int> like(ptr_likeness, ptr_likeness + N); for (int i = 0; i < N; i++) { printf("%d %d\n", idx[i], like[i]); } */ // Loop through the sorted list of arg match and pick the best 31 for (int i = 0; i < N; i++) { // After sorting grab the 32 who arent visited yet int pos_index = index[i]; // If its not already in a warp then put it in this warp if (visited[pos_index] == false) { visited[pos_index] = true; result_array[num_mapped] = pos_index; num_mapped++; } // If this warp has been filled move on if ((num_mapped % WARP_SIZE) == 0) { break; } } // Start a new warp for (int i = 0; i < N; i++) { // Find the first index with visited == false if (visited[i] == false) { cur_index = i; visited[cur_index] = true; result_array[num_mapped] = cur_index; num_mapped++; break; } } } } ///////////////////////////////////////// /////////////////////////////////////////
0a97a3b7960b2bcb86311232591f50b2d3aa13f8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "saber/funcs/impl/cuda/saber_reduce_min.h" namespace anakin { namespace saber { /** * @brief reduce tensor acorrding to the given reduce dim. * e.g. * input tensor with shape [5, 2, 10, 4] (rank = 4, how many dimentions does a tensor have.) * and the reduce dim may have the following forms: * 1) reduce_dim = None, no reduce dim. It means that reduce all dimentions [default] * output's shape [1, 1, 1, 1]. * 2) reduce_dim = x, x is the dimention we want to reduce. * output's shape: * x = 0, for example, the shape will be [1, 2, 10, 4] if keep_dim is true, otherwise it will be [2*10*4, 1, 1, 1]. * x = 2, for example, the shape will be [5, 2, 1, 4] if keep_dim is true, otherwise it will be [5*2*4, 1, 1, 1]. * and so on. * 3) reduce_dim = [x, y], It will reduce two dimetions x and y. * output's shape: * reduce_dim = [0, 1], for example, the shape will be [1, 1, 10 ,4] or [10*4, 1, 1, 1] and so on. * Notes: * if reduce_dim[i] < 0: * do * reduce_dim[i] += rank. * * @tparam OpDtype * @param inputs * @param outputs * @param param * @return SaberStatus */ //This function is used to implement atioMin based on CAS function. // __device__ float atomicMin(float* address, float val) { // unsigned long long int* address_as_ull = (unsigned long long int*)address; // unsigned long long int old = *address_as_ull, assumed; // do{ // assumed = old; // old = atomicCAS(address_as_ull, assumed, __float_as_longlong( // fminf(val, __longlong_as_float(assumed)))); // }while(assumed != old); // return __longlong_as_float(old); // } __device__ double atomicMin(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do{ assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong( fmin(val, __longlong_as_double(assumed)))); }while(assumed != old); return __longlong_as_double(old); } __device__ double atomicMin(float* address, float val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do{ assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int( fminf(val, __int_as_float(assumed)))); }while(assumed != old); return __longlong_as_double(old); } //thread num: CHW template <typename dtype> __global__ void kernel_reduce_n(const dtype* src, dtype* dst, const int num_in, const int channel_in, const int height_in, const int width_in, const int count) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_num = blockDim.x * gridDim.x; int feature_map = height_in * width_in; //HW int size = channel_in * feature_map;// CHW int c_id = tid / feature_map; int feature_map_inner_index = tid % feature_map; dtype min = src[tid]; for (int n = 1; n < num_in; ++n) { dtype tmp = src[n * size + c_id * feature_map + feature_map_inner_index]; min = tmp < min ? tmp : min; } dst[tid] = min; } //thread num:NHW template <typename dtype> __global__ void kernel_reduce_c(const dtype* src, dtype* dst, const int num_in, const int channel_in, const int height_in, const int width_in, const int count) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_num = blockDim.x * gridDim.x; int feature_map = height_in * width_in; int size = channel_in * feature_map; for (int i = tid; i < count; i += thread_num) { int n_id = i / feature_map; int inner_index = i % feature_map; dtype min = src[n_id * size + inner_index]; for (int c = 1; c < channel_in; ++c) { dtype tmp = src[n_id * size + c * feature_map + inner_index]; min = tmp < min? tmp : min; } dst[n_id * feature_map + inner_index] = min; // Is data_index same to tid/i?. } } //thread num: NCW template <typename dtype> __global__ void kernel_reduce_h(const dtype* src, dtype* dst, const int num_in, const int channel_in, const int height_in, const int width_in, const int count) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_num = blockDim.x * gridDim.x; int feature_map = height_in * width_in; //CW int cw_size = channel_in * width_in; //CW int size = channel_in * feature_map; //CHW for (int i = tid; i < count; i += thread_num) { int n_id = i / cw_size; int c_id = (i / width_in) % channel_in; int inner_index = i % width_in; int data_index = n_id * size + c_id * feature_map + inner_index; dtype min = src[data_index]; for (int h = 1; h < height_in; ++h) { dtype tmp = src[data_index + h * width_in]; min = tmp < min? tmp : min; } dst[n_id * cw_size + c_id * width_in + inner_index] = min; // Is data_index same to tid/i?. } } //thread num: NCH template <typename dtype> __global__ void kernel_reduce_w(const dtype* src, dtype* dst, const int num_in, const int channel_in, const int height_in, const int width_in, const int count) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_num = blockDim.x * gridDim.x; int ch_size = channel_in * height_in; //CH int size = ch_size * width_in; //CHW int feature_map = height_in * width_in; //HW for (int i = tid; i < count; i += thread_num) { int n_id = i / ch_size; int c_id = (i / height_in) % channel_in; int inner_index = i % height_in; int data_index = n_id * size + c_id * feature_map + inner_index * width_in; dtype min = src[data_index]; for (int w = 1; w < width_in; ++w) { dtype tmp = src[data_index + w]; min = tmp < min? tmp : min; } dst[n_id * ch_size + c_id * height_in + inner_index] = min; } } //reduce all. template <typename dtype> __global__ void kernel_reduce_nchw(const dtype* src, dtype* dst, const int count) { int n_id = threadIdx.x + blockIdx.x * blockDim.x; int tid = threadIdx.x; int thread_num = blockDim.x * gridDim.x; dst[0] = src[n_id]; extern __shared__ dtype s[]; dtype min = src[n_id]; for (int i = n_id; i < count; i += thread_num) { min = src[i] < min ? src[i] : min; } s[tid] = min; __syncthreads(); int powOf2 = blockDim.x; if (powOf2 & (powOf2 - 1)) { //block threads are not pow of 2. while (powOf2 & (powOf2 - 1)) { powOf2 &= powOf2 - 1; } // it'll end when it find pow of 2. if (tid >= powOf2) { s[tid - powOf2] = s[tid - powOf2] < s[tid]? s[tid - powOf2] : s[tid]; } __syncthreads(); } for (int i = powOf2>>1; i > 0; i>>=1) { if (tid < i) { s[tid] = s[tid] < s[tid + i]? s[tid] : s[tid + i]; } __syncthreads(); } if (threadIdx.x == 0) { //double tmp = s[] atomicMin(&dst[0], s[threadIdx.x]); } } template <DataType OpDtype> SaberStatus SaberReduceMin<NV, OpDtype>::dispatch(const std::vector<Tensor<NV>*>& inputs, std::vector<Tensor<NV>*>& outputs, ReduceMinParam<NV>& param) { hipStream_t cuda_stream = this->_ctx->get_compute_stream(); const OpDataType* input_ptr = (const OpDataType*)inputs[0]->data(); OpDataType* output_ptr = (OpDataType*)outputs[0]->mutable_data(); int count = outputs[0]->valid_size(); if (_reduce_dim.empty()) { // reduce_all int count_all = inputs[0]->valid_size(); int grid, thread_num; if (count_all < CUDA_NUM_THREADS) { thread_num = count_all; grid = 1; }else { thread_num = CUDA_NUM_THREADS; if (CUDA_GET_BLOCKS(count) >= 128) //This is to avoid share memory blowing up. grid = 64; else grid = CUDA_GET_BLOCKS(count); } int sharedSize = thread_num * 4; hipLaunchKernelGGL(( kernel_reduce_nchw<OpDataType>), dim3(grid), dim3(thread_num), sharedSize, cuda_stream, input_ptr, output_ptr, count_all); }else if (_reduce_dim.size() == 1) { if (_reduce_dim[0] == 0) { //reduce n hipLaunchKernelGGL(( kernel_reduce_n<OpDataType>), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, input_ptr, output_ptr, _num, _channel, _height, _width, count); } if (_reduce_dim[0] == 1) { //reduce c hipLaunchKernelGGL(( kernel_reduce_c<OpDataType>), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, input_ptr, output_ptr, _num, _channel, _height, _width, count); } if (_reduce_dim[0] == 2) { //reduce h hipLaunchKernelGGL(( kernel_reduce_h<OpDataType>), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, input_ptr, output_ptr, _num, _channel, _height, _width, count); } if (_reduce_dim[0] == 3) { //reduce h hipLaunchKernelGGL(( kernel_reduce_w<OpDataType>), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, input_ptr, output_ptr, _num, _channel, _height, _width, count); } } else if (_reduce_dim.size() == 2) { //only consecutive reduce dim? [0,1] [1, 2], not [0, 2]? if (_reduce_dim[0] == 0 && _reduce_dim[1] == 1) { //reduce n, c. reduce n first. _tensor_tmp.reshape(std::vector<int>({1, _channel, _height, _width})); int count_n = _tensor_tmp.valid_size(); int count_nc = count_n / _tensor_tmp.channel(); OpDataType* tmp_out = (OpDataType*)_tensor_tmp.mutable_data(); hipLaunchKernelGGL(( kernel_reduce_n<OpDataType>), dim3(CUDA_GET_BLOCKS(count_n)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, input_ptr, tmp_out, _num, _channel, _height, _width, count_n); hipLaunchKernelGGL(( kernel_reduce_c<OpDataType>), dim3(CUDA_GET_BLOCKS(count_nc)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, tmp_out, output_ptr, 1, _channel, _height, _width, count_nc); }else if (_reduce_dim[0] == 1 && _reduce_dim[1] == 2) { //reduce c. h. reduce c first. _tensor_tmp.reshape(std::vector<int>({_num, 1, _height, _width})); int count_c = _tensor_tmp.valid_size(); int count_ch = count_c / _tensor_tmp.height(); OpDataType* tmp_out = (OpDataType*)_tensor_tmp.mutable_data(); hipLaunchKernelGGL(( kernel_reduce_c<OpDataType>), dim3(CUDA_GET_BLOCKS(count_c)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, input_ptr, tmp_out, _num, _channel, _height, _width, count_c); hipLaunchKernelGGL(( kernel_reduce_h<OpDataType>), dim3(CUDA_GET_BLOCKS(count_ch)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, tmp_out, output_ptr, _num, 1, _height, _width, count_ch); }else if (_reduce_dim[0] == 2 && _reduce_dim[1] == 3) { //reduce h, w. reduce h first. _tensor_tmp.reshape(std::vector<int>({_num, _channel, 1, _width})); int count_h = _tensor_tmp.valid_size(); int count_hw = count_h / _tensor_tmp.width(); OpDataType* tmp_out = (OpDataType*)_tensor_tmp.mutable_data(); hipLaunchKernelGGL(( kernel_reduce_h<OpDataType>), dim3(CUDA_GET_BLOCKS(count_h)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, input_ptr, tmp_out, _num, _channel, _height, _width, count_h); hipLaunchKernelGGL(( kernel_reduce_w<OpDataType>), dim3(CUDA_GET_BLOCKS(count_hw)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, tmp_out, output_ptr, _num, _channel, 1, _width, count_hw); }else { LOG(FATAL) <<"[reduce_min] invalid reduce_dim!!!"; } }else { LOG(FATAL) << "[reduce_min]Reducing size over than 2 is not support!!"; } CUDA_POST_KERNEL_CHECK; return SaberSuccess; } template class SaberReduceMin<NV, AK_FLOAT>; DEFINE_OP_TEMPLATE(SaberReduceMin, ReduceMinParam, NV, AK_HALF); DEFINE_OP_TEMPLATE(SaberReduceMin, ReduceMinParam, NV, AK_INT8); } // namespace saber. } // namespace anakin.
0a97a3b7960b2bcb86311232591f50b2d3aa13f8.cu
#include "saber/funcs/impl/cuda/saber_reduce_min.h" namespace anakin { namespace saber { /** * @brief reduce tensor acorrding to the given reduce dim. * e.g. * input tensor with shape [5, 2, 10, 4] (rank = 4, how many dimentions does a tensor have.) * and the reduce dim may have the following forms: * 1) reduce_dim = None, no reduce dim. It means that reduce all dimentions [default] * output's shape [1, 1, 1, 1]. * 2) reduce_dim = x, x is the dimention we want to reduce. * output's shape: * x = 0, for example, the shape will be [1, 2, 10, 4] if keep_dim is true, otherwise it will be [2*10*4, 1, 1, 1]. * x = 2, for example, the shape will be [5, 2, 1, 4] if keep_dim is true, otherwise it will be [5*2*4, 1, 1, 1]. * and so on. * 3) reduce_dim = [x, y], It will reduce two dimetions x and y. * output's shape: * reduce_dim = [0, 1], for example, the shape will be [1, 1, 10 ,4] or [10*4, 1, 1, 1] and so on. * Notes: * if reduce_dim[i] < 0: * do * reduce_dim[i] += rank. * * @tparam OpDtype * @param inputs * @param outputs * @param param * @return SaberStatus */ //This function is used to implement atioMin based on CAS function. // __device__ float atomicMin(float* address, float val) { // unsigned long long int* address_as_ull = (unsigned long long int*)address; // unsigned long long int old = *address_as_ull, assumed; // do{ // assumed = old; // old = atomicCAS(address_as_ull, assumed, __float_as_longlong( // fminf(val, __longlong_as_float(assumed)))); // }while(assumed != old); // return __longlong_as_float(old); // } __device__ double atomicMin(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do{ assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong( fmin(val, __longlong_as_double(assumed)))); }while(assumed != old); return __longlong_as_double(old); } __device__ double atomicMin(float* address, float val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do{ assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int( fminf(val, __int_as_float(assumed)))); }while(assumed != old); return __longlong_as_double(old); } //thread num: CHW template <typename dtype> __global__ void kernel_reduce_n(const dtype* src, dtype* dst, const int num_in, const int channel_in, const int height_in, const int width_in, const int count) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_num = blockDim.x * gridDim.x; int feature_map = height_in * width_in; //HW int size = channel_in * feature_map;// CHW int c_id = tid / feature_map; int feature_map_inner_index = tid % feature_map; dtype min = src[tid]; for (int n = 1; n < num_in; ++n) { dtype tmp = src[n * size + c_id * feature_map + feature_map_inner_index]; min = tmp < min ? tmp : min; } dst[tid] = min; } //thread num:NHW template <typename dtype> __global__ void kernel_reduce_c(const dtype* src, dtype* dst, const int num_in, const int channel_in, const int height_in, const int width_in, const int count) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_num = blockDim.x * gridDim.x; int feature_map = height_in * width_in; int size = channel_in * feature_map; for (int i = tid; i < count; i += thread_num) { int n_id = i / feature_map; int inner_index = i % feature_map; dtype min = src[n_id * size + inner_index]; for (int c = 1; c < channel_in; ++c) { dtype tmp = src[n_id * size + c * feature_map + inner_index]; min = tmp < min? tmp : min; } dst[n_id * feature_map + inner_index] = min; // Is data_index same to tid/i?. } } //thread num: NCW template <typename dtype> __global__ void kernel_reduce_h(const dtype* src, dtype* dst, const int num_in, const int channel_in, const int height_in, const int width_in, const int count) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_num = blockDim.x * gridDim.x; int feature_map = height_in * width_in; //CW int cw_size = channel_in * width_in; //CW int size = channel_in * feature_map; //CHW for (int i = tid; i < count; i += thread_num) { int n_id = i / cw_size; int c_id = (i / width_in) % channel_in; int inner_index = i % width_in; int data_index = n_id * size + c_id * feature_map + inner_index; dtype min = src[data_index]; for (int h = 1; h < height_in; ++h) { dtype tmp = src[data_index + h * width_in]; min = tmp < min? tmp : min; } dst[n_id * cw_size + c_id * width_in + inner_index] = min; // Is data_index same to tid/i?. } } //thread num: NCH template <typename dtype> __global__ void kernel_reduce_w(const dtype* src, dtype* dst, const int num_in, const int channel_in, const int height_in, const int width_in, const int count) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_num = blockDim.x * gridDim.x; int ch_size = channel_in * height_in; //CH int size = ch_size * width_in; //CHW int feature_map = height_in * width_in; //HW for (int i = tid; i < count; i += thread_num) { int n_id = i / ch_size; int c_id = (i / height_in) % channel_in; int inner_index = i % height_in; int data_index = n_id * size + c_id * feature_map + inner_index * width_in; dtype min = src[data_index]; for (int w = 1; w < width_in; ++w) { dtype tmp = src[data_index + w]; min = tmp < min? tmp : min; } dst[n_id * ch_size + c_id * height_in + inner_index] = min; } } //reduce all. template <typename dtype> __global__ void kernel_reduce_nchw(const dtype* src, dtype* dst, const int count) { int n_id = threadIdx.x + blockIdx.x * blockDim.x; int tid = threadIdx.x; int thread_num = blockDim.x * gridDim.x; dst[0] = src[n_id]; extern __shared__ dtype s[]; dtype min = src[n_id]; for (int i = n_id; i < count; i += thread_num) { min = src[i] < min ? src[i] : min; } s[tid] = min; __syncthreads(); int powOf2 = blockDim.x; if (powOf2 & (powOf2 - 1)) { //block threads are not pow of 2. while (powOf2 & (powOf2 - 1)) { powOf2 &= powOf2 - 1; } // it'll end when it find pow of 2. if (tid >= powOf2) { s[tid - powOf2] = s[tid - powOf2] < s[tid]? s[tid - powOf2] : s[tid]; } __syncthreads(); } for (int i = powOf2>>1; i > 0; i>>=1) { if (tid < i) { s[tid] = s[tid] < s[tid + i]? s[tid] : s[tid + i]; } __syncthreads(); } if (threadIdx.x == 0) { //double tmp = s[] atomicMin(&dst[0], s[threadIdx.x]); } } template <DataType OpDtype> SaberStatus SaberReduceMin<NV, OpDtype>::dispatch(const std::vector<Tensor<NV>*>& inputs, std::vector<Tensor<NV>*>& outputs, ReduceMinParam<NV>& param) { cudaStream_t cuda_stream = this->_ctx->get_compute_stream(); const OpDataType* input_ptr = (const OpDataType*)inputs[0]->data(); OpDataType* output_ptr = (OpDataType*)outputs[0]->mutable_data(); int count = outputs[0]->valid_size(); if (_reduce_dim.empty()) { // reduce_all int count_all = inputs[0]->valid_size(); int grid, thread_num; if (count_all < CUDA_NUM_THREADS) { thread_num = count_all; grid = 1; }else { thread_num = CUDA_NUM_THREADS; if (CUDA_GET_BLOCKS(count) >= 128) //This is to avoid share memory blowing up. grid = 64; else grid = CUDA_GET_BLOCKS(count); } int sharedSize = thread_num * 4; kernel_reduce_nchw<OpDataType><<<grid, thread_num, sharedSize, cuda_stream>>>( input_ptr, output_ptr, count_all); }else if (_reduce_dim.size() == 1) { if (_reduce_dim[0] == 0) { //reduce n kernel_reduce_n<OpDataType><<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>( input_ptr, output_ptr, _num, _channel, _height, _width, count); } if (_reduce_dim[0] == 1) { //reduce c kernel_reduce_c<OpDataType><<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>( input_ptr, output_ptr, _num, _channel, _height, _width, count); } if (_reduce_dim[0] == 2) { //reduce h kernel_reduce_h<OpDataType><<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>( input_ptr, output_ptr, _num, _channel, _height, _width, count); } if (_reduce_dim[0] == 3) { //reduce h kernel_reduce_w<OpDataType><<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>( input_ptr, output_ptr, _num, _channel, _height, _width, count); } } else if (_reduce_dim.size() == 2) { //only consecutive reduce dim? [0,1] [1, 2], not [0, 2]? if (_reduce_dim[0] == 0 && _reduce_dim[1] == 1) { //reduce n, c. reduce n first. _tensor_tmp.reshape(std::vector<int>({1, _channel, _height, _width})); int count_n = _tensor_tmp.valid_size(); int count_nc = count_n / _tensor_tmp.channel(); OpDataType* tmp_out = (OpDataType*)_tensor_tmp.mutable_data(); kernel_reduce_n<OpDataType><<<CUDA_GET_BLOCKS(count_n), CUDA_NUM_THREADS, 0, cuda_stream>>>( input_ptr, tmp_out, _num, _channel, _height, _width, count_n); kernel_reduce_c<OpDataType><<<CUDA_GET_BLOCKS(count_nc), CUDA_NUM_THREADS, 0, cuda_stream>>>( tmp_out, output_ptr, 1, _channel, _height, _width, count_nc); }else if (_reduce_dim[0] == 1 && _reduce_dim[1] == 2) { //reduce c. h. reduce c first. _tensor_tmp.reshape(std::vector<int>({_num, 1, _height, _width})); int count_c = _tensor_tmp.valid_size(); int count_ch = count_c / _tensor_tmp.height(); OpDataType* tmp_out = (OpDataType*)_tensor_tmp.mutable_data(); kernel_reduce_c<OpDataType><<<CUDA_GET_BLOCKS(count_c), CUDA_NUM_THREADS, 0, cuda_stream>>>( input_ptr, tmp_out, _num, _channel, _height, _width, count_c); kernel_reduce_h<OpDataType><<<CUDA_GET_BLOCKS(count_ch), CUDA_NUM_THREADS, 0, cuda_stream>>>( tmp_out, output_ptr, _num, 1, _height, _width, count_ch); }else if (_reduce_dim[0] == 2 && _reduce_dim[1] == 3) { //reduce h, w. reduce h first. _tensor_tmp.reshape(std::vector<int>({_num, _channel, 1, _width})); int count_h = _tensor_tmp.valid_size(); int count_hw = count_h / _tensor_tmp.width(); OpDataType* tmp_out = (OpDataType*)_tensor_tmp.mutable_data(); kernel_reduce_h<OpDataType><<<CUDA_GET_BLOCKS(count_h), CUDA_NUM_THREADS, 0, cuda_stream>>>( input_ptr, tmp_out, _num, _channel, _height, _width, count_h); kernel_reduce_w<OpDataType><<<CUDA_GET_BLOCKS(count_hw), CUDA_NUM_THREADS, 0, cuda_stream>>>( tmp_out, output_ptr, _num, _channel, 1, _width, count_hw); }else { LOG(FATAL) <<"[reduce_min] invalid reduce_dim!!!"; } }else { LOG(FATAL) << "[reduce_min]Reducing size over than 2 is not support!!"; } CUDA_POST_KERNEL_CHECK; return SaberSuccess; } template class SaberReduceMin<NV, AK_FLOAT>; DEFINE_OP_TEMPLATE(SaberReduceMin, ReduceMinParam, NV, AK_HALF); DEFINE_OP_TEMPLATE(SaberReduceMin, ReduceMinParam, NV, AK_INT8); } // namespace saber. } // namespace anakin.
bae6360048e7196211c04825ba22989d8253d284.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> //////////////////////////////////////////////////////////////////////////////// // GPU-specific defines //////////////////////////////////////////////////////////////////////////////// //Maps to a single instruction on G8x / G9x / G10x #define IMAD(a, b, c) ( __mul24((a), (b)) + (c) ) //Use unrolled innermost convolution loop #define UNROLL_INNER 1 //Round a / b to nearest higher integer value inline int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } //Align a to nearest higher multiple of b inline int iAlignUp(int a, int b) { return (a % b != 0) ? (a - a % b + b) : a; } texture<float, 2, hipReadModeElementType> texSrc; texture<float, 2, hipReadModeElementType> texSrc2; //////////////////////////////////////////////////////////////////////////////// // Slinding window //////////////////////////////////////////////////////////////////////////////// __global__ void slidingWindowKernel( float *d_Dst, int matrixW, int matrixH, int KERNEL_LENGTH1, int stride ) { const int ix = stride -1 + IMAD(blockDim.x, blockIdx.x, threadIdx.x) * stride; const int iy = IMAD(blockDim.y, blockIdx.y, threadIdx.y); const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; if (ix >= matrixW - KERNEL_LENGTH1 || ix < KERNEL_LENGTH1 || iy >= matrixH) { return; } float sum = 0; for (int k = 0; k < KERNEL_LENGTH1; k++) { sum += (tex2D(texSrc, x - (float)k, y) - tex2D(texSrc2, (float)(KERNEL_LENGTH1 - k - 1), y))*(tex2D(texSrc, x - (float)k, y) - tex2D(texSrc2, (float)(KERNEL_LENGTH1 - k - 1), y)); } d_Dst[IMAD(iy, matrixW, ix)] = 1.0/(1e-6+sum/float(KERNEL_LENGTH1)); } extern "C" void slidingWindowGPU( float *d_Dst, hipArray *a_Src, hipArray *b_Src, int matrixW, int matrixH, int KERNEL_LENGTH1, int stride ) { dim3 threads(16, 12); dim3 blocks(iDivUp(matrixW, threads.x), iDivUp(matrixH, threads.y)); hipBindTextureToArray(texSrc, a_Src); hipBindTextureToArray(texSrc2, b_Src); hipLaunchKernelGGL(( slidingWindowKernel), dim3(blocks), dim3(threads), 0, 0, d_Dst, matrixW, matrixH, KERNEL_LENGTH1, stride ); hipUnbindTexture(texSrc); hipUnbindTexture(texSrc2); } extern "C" { void cuda_distance(float *a, float *h_OutputGPU, size_t W, size_t H, size_t KL1, size_t strd) { float *h_Input, *h_Input2; hipArray *a_Src; hipArray *b_Src; hipChannelFormatDesc floatTex = hipCreateChannelDesc<float>(); float *d_Output; const int matrixW = W; const int matrixH = H; const int KERNEL_LENGTH1 = KL1; const int stride = strd; h_Input = (float *)malloc(matrixW* matrixH * sizeof(float)); h_Input2 = (float *)malloc(KERNEL_LENGTH1 * matrixH * sizeof(float)); hipMallocArray(&a_Src, &floatTex, matrixW, matrixH); hipMallocArray(&b_Src, &floatTex, KERNEL_LENGTH1, matrixH); hipMalloc((void **)&d_Output, matrixW * matrixH * sizeof(float)); unsigned int w=0; for (unsigned int j = 0; j < matrixH; j++) { for (unsigned int i = 0; i < matrixW; i++) { h_Input[w] = a[i+j*matrixW]; w=w+1; } } w=0; for (unsigned int j = 0; j < matrixH; j++) { for (unsigned int i = matrixW-KERNEL_LENGTH1; i < matrixW; i++) { h_Input2[w] = a[i+j*matrixW]; w=w+1; } } hipMemcpyToArray(a_Src, 0, 0, h_Input, matrixW * matrixH * sizeof(float), hipMemcpyHostToDevice); hipMemcpyToArray(b_Src, 0, 0, h_Input2, KERNEL_LENGTH1 * matrixH * sizeof(float), hipMemcpyHostToDevice); hipDeviceSynchronize(); slidingWindowGPU( d_Output, a_Src, b_Src, matrixW, matrixH, KERNEL_LENGTH1, stride ); hipDeviceSynchronize(); hipMemcpy(h_OutputGPU, d_Output, matrixW * matrixH * sizeof(float), hipMemcpyDeviceToHost); hipFree(d_Output); hipFreeArray(a_Src); hipFreeArray(b_Src); free(h_Input); free(h_Input2); } }
bae6360048e7196211c04825ba22989d8253d284.cu
#include <cuda.h> #include <cuda_runtime_api.h> //////////////////////////////////////////////////////////////////////////////// // GPU-specific defines //////////////////////////////////////////////////////////////////////////////// //Maps to a single instruction on G8x / G9x / G10x #define IMAD(a, b, c) ( __mul24((a), (b)) + (c) ) //Use unrolled innermost convolution loop #define UNROLL_INNER 1 //Round a / b to nearest higher integer value inline int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } //Align a to nearest higher multiple of b inline int iAlignUp(int a, int b) { return (a % b != 0) ? (a - a % b + b) : a; } texture<float, 2, cudaReadModeElementType> texSrc; texture<float, 2, cudaReadModeElementType> texSrc2; //////////////////////////////////////////////////////////////////////////////// // Slinding window //////////////////////////////////////////////////////////////////////////////// __global__ void slidingWindowKernel( float *d_Dst, int matrixW, int matrixH, int KERNEL_LENGTH1, int stride ) { const int ix = stride -1 + IMAD(blockDim.x, blockIdx.x, threadIdx.x) * stride; const int iy = IMAD(blockDim.y, blockIdx.y, threadIdx.y); const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; if (ix >= matrixW - KERNEL_LENGTH1 || ix < KERNEL_LENGTH1 || iy >= matrixH) { return; } float sum = 0; for (int k = 0; k < KERNEL_LENGTH1; k++) { sum += (tex2D(texSrc, x - (float)k, y) - tex2D(texSrc2, (float)(KERNEL_LENGTH1 - k - 1), y))*(tex2D(texSrc, x - (float)k, y) - tex2D(texSrc2, (float)(KERNEL_LENGTH1 - k - 1), y)); } d_Dst[IMAD(iy, matrixW, ix)] = 1.0/(1e-6+sum/float(KERNEL_LENGTH1)); } extern "C" void slidingWindowGPU( float *d_Dst, cudaArray *a_Src, cudaArray *b_Src, int matrixW, int matrixH, int KERNEL_LENGTH1, int stride ) { dim3 threads(16, 12); dim3 blocks(iDivUp(matrixW, threads.x), iDivUp(matrixH, threads.y)); cudaBindTextureToArray(texSrc, a_Src); cudaBindTextureToArray(texSrc2, b_Src); slidingWindowKernel<<<blocks, threads>>>( d_Dst, matrixW, matrixH, KERNEL_LENGTH1, stride ); cudaUnbindTexture(texSrc); cudaUnbindTexture(texSrc2); } extern "C" { void cuda_distance(float *a, float *h_OutputGPU, size_t W, size_t H, size_t KL1, size_t strd) { float *h_Input, *h_Input2; cudaArray *a_Src; cudaArray *b_Src; cudaChannelFormatDesc floatTex = cudaCreateChannelDesc<float>(); float *d_Output; const int matrixW = W; const int matrixH = H; const int KERNEL_LENGTH1 = KL1; const int stride = strd; h_Input = (float *)malloc(matrixW* matrixH * sizeof(float)); h_Input2 = (float *)malloc(KERNEL_LENGTH1 * matrixH * sizeof(float)); cudaMallocArray(&a_Src, &floatTex, matrixW, matrixH); cudaMallocArray(&b_Src, &floatTex, KERNEL_LENGTH1, matrixH); cudaMalloc((void **)&d_Output, matrixW * matrixH * sizeof(float)); unsigned int w=0; for (unsigned int j = 0; j < matrixH; j++) { for (unsigned int i = 0; i < matrixW; i++) { h_Input[w] = a[i+j*matrixW]; w=w+1; } } w=0; for (unsigned int j = 0; j < matrixH; j++) { for (unsigned int i = matrixW-KERNEL_LENGTH1; i < matrixW; i++) { h_Input2[w] = a[i+j*matrixW]; w=w+1; } } cudaMemcpyToArray(a_Src, 0, 0, h_Input, matrixW * matrixH * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpyToArray(b_Src, 0, 0, h_Input2, KERNEL_LENGTH1 * matrixH * sizeof(float), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); slidingWindowGPU( d_Output, a_Src, b_Src, matrixW, matrixH, KERNEL_LENGTH1, stride ); cudaDeviceSynchronize(); cudaMemcpy(h_OutputGPU, d_Output, matrixW * matrixH * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_Output); cudaFreeArray(a_Src); cudaFreeArray(b_Src); free(h_Input); free(h_Input2); } }
459cf184e95df767ced8e08b5fd047671dc08563.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/hip_runtime_api.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/device_functions.h" #include <hip/hip_runtime_api.h> #include "writeToCSVFileHeader.h" #include "userInputHeader.h" void printArray(int *elements); int deviceBlocks; int threadsPerBlock; int elementsToSort; int threadCount; //Max times we cann run the process int executionCount; const int randMax = 10000; void createUnsortedArray(int* elements){ //Get size and cuda dimentions from user input for (int i = 0; i < elementsToSort; ++i){ elements[i] = rand() % randMax - rand() % 5; } } bool isSorted(int *elements){ bool sorted = true; for (int i = 0; i < (elementsToSort - 1); ++i){ if (elements[i] > elements[i + 1]){ sorted = false; } } return sorted; } double getElapsedTime(clock_t start, clock_t stop) { double elapsed = ((double)(stop - start)) / CLOCKS_PER_SEC; printf("Elapsed time: %.3fs\n", elapsed); return elapsed; } int random_int() { return (int)rand() / (int)2048; } /* We get our indexes to swap buy xoring our thread index with the step. This essentially wraps the thread Id round the step value meaning the only values porduced that are bigger than the Thread Id will be those within the requied step length */ __global__ void stepskernel(int *dev_values, int step, int phaseLength) { unsigned int firstIndex, XoredSecondIndex; //Set it to the thread Id firstIndex = threadIdx.x + blockDim.x * blockIdx.x; XoredSecondIndex = firstIndex ^ step; //Threads i corrasponding to the desired bitonic element will be used for the swap if ((XoredSecondIndex)>firstIndex) { if ((firstIndex&phaseLength) == 0) { if (dev_values[firstIndex]>dev_values[XoredSecondIndex]) { int temp = dev_values[firstIndex]; dev_values[firstIndex] = dev_values[XoredSecondIndex]; dev_values[XoredSecondIndex] = temp; } } if ((firstIndex&phaseLength) != 0) { if (dev_values[firstIndex]<dev_values[XoredSecondIndex]) { int temp = dev_values[firstIndex]; dev_values[firstIndex] = dev_values[XoredSecondIndex]; dev_values[XoredSecondIndex] = temp; } } } } /* Main function call. Created array and calls stepskernel based of the size of the bitonic sequences and step. */ void bitonic_sort(int *values) { int *dev_values; size_t size = elementsToSort* sizeof(int); //Allocate memory on the device then copy our host array to device pointer hipMalloc((void**)&dev_values, size); hipMemcpy(dev_values, values, size, hipMemcpyHostToDevice); dim3 blocks(deviceBlocks, 1); dim3 threads(threadsPerBlock, 1); int step, phaseLength; for (phaseLength = 2; phaseLength <= elementsToSort; phaseLength <<= 1) { for (step = phaseLength >> 1; step>0; step = step >> 1) { stepskernel << <blocks, threads >> >(dev_values, step, phaseLength); } } hipMemcpy(values, dev_values, size, hipMemcpyDeviceToHost); hipFree(dev_values); } void preExecution(){ int values[7]; values[0] = 10; values[1] = 13; values[2] = 9; values[3] = 18; values[4] = 26; values[4] = 100; values[6] = 3; bitonic_sort(values); } int main(void) { executionCount = getMaxProcessCount(); int fixedExecutionCount = executionCount; preExecution(); bool runSort = true; runSort = runSortAgain(); //Pointers to store our results that we're writing to CSV files, allocate space entered buy the user int* threadCounts = (int*)malloc(executionCount*sizeof(int)); int* allBlocks = (int*)malloc(executionCount*sizeof(int));; double* timeResults = (double*)malloc(executionCount*sizeof(double));; char* arrayStates = (char*)malloc(executionCount*sizeof(char)); double time; clock_t start, stop; //Counter so we can assine values to the array in the execution loop while (runSort && executionCount != 0){ //Get thread, blocks and element count //Get total elements and suggested block thread configurations blockAndThreadCounts inputCountandSuggestedThreadBlockCount; inputCountandSuggestedThreadBlockCount = getElementCounts(); elementsToSort = inputCountandSuggestedThreadBlockCount.elementCount; //wirte possible thread and block configurations to text file printf("Writing suggested block thread configuration..."); writeSuggestedBlockThreadConfigToCsv(inputCountandSuggestedThreadBlockCount.threadCounts, inputCountandSuggestedThreadBlockCount.blockCounts, inputCountandSuggestedThreadBlockCount.combinationsCount ); printf("Done \n"); //elementsToSort = inputCountandSuggestedThreadBlockCount.elementCount; deviceBlocks = getBlockCount(); threadsPerBlock = getThreadCount(); threadCount = threadsPerBlock * deviceBlocks; //Malloc array, add values to it and write unsorted array to csv file int* values = (int*)malloc(elementsToSort*sizeof(int)); createUnsortedArray(values); writeBlockElementCsvFile(values, "preSorted", threadCount, deviceBlocks); //Do Sort and time it start = clock(); bitonic_sort(values); stop = clock(); time = getElapsedTime(start, stop); char* arrayState; char arrayStateChar; if (isSorted(values)){ printf("Is Sorted \n"); arrayState = "sorted"; arrayStateChar = 's'; } else{ printf("Not Sorted \n"); arrayState = "unsorted"; arrayStateChar = 'u'; } writeBlockElementCsvFile(values, arrayState, threadCount, deviceBlocks); //Allocate results values to pointers *threadCounts = threadCount; *allBlocks = deviceBlocks; *timeResults = time; *arrayStates = arrayStateChar; //Increment Result pointers threadCounts++; allBlocks++; timeResults++; arrayStates++; free(values); //Check again for user input executionCount--; } printf("Execution ended. Writing results to C:\BitonicSortArrayCSVFiles /n"); writeSortResultsToCsv(timeResults, "ParallelBitonicSort", arrayStates, threadCounts, allBlocks, fixedExecutionCount); getchar(); }
459cf184e95df767ced8e08b5fd047671dc08563.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda_runtime_api.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.h" #include <cuda_runtime_api.h> #include "writeToCSVFileHeader.h" #include "userInputHeader.h" void printArray(int *elements); int deviceBlocks; int threadsPerBlock; int elementsToSort; int threadCount; //Max times we cann run the process int executionCount; const int randMax = 10000; void createUnsortedArray(int* elements){ //Get size and cuda dimentions from user input for (int i = 0; i < elementsToSort; ++i){ elements[i] = rand() % randMax - rand() % 5; } } bool isSorted(int *elements){ bool sorted = true; for (int i = 0; i < (elementsToSort - 1); ++i){ if (elements[i] > elements[i + 1]){ sorted = false; } } return sorted; } double getElapsedTime(clock_t start, clock_t stop) { double elapsed = ((double)(stop - start)) / CLOCKS_PER_SEC; printf("Elapsed time: %.3fs\n", elapsed); return elapsed; } int random_int() { return (int)rand() / (int)2048; } /* We get our indexes to swap buy xoring our thread index with the step. This essentially wraps the thread Id round the step value meaning the only values porduced that are bigger than the Thread Id will be those within the requied step length */ __global__ void stepskernel(int *dev_values, int step, int phaseLength) { unsigned int firstIndex, XoredSecondIndex; //Set it to the thread Id firstIndex = threadIdx.x + blockDim.x * blockIdx.x; XoredSecondIndex = firstIndex ^ step; //Threads i corrasponding to the desired bitonic element will be used for the swap if ((XoredSecondIndex)>firstIndex) { if ((firstIndex&phaseLength) == 0) { if (dev_values[firstIndex]>dev_values[XoredSecondIndex]) { int temp = dev_values[firstIndex]; dev_values[firstIndex] = dev_values[XoredSecondIndex]; dev_values[XoredSecondIndex] = temp; } } if ((firstIndex&phaseLength) != 0) { if (dev_values[firstIndex]<dev_values[XoredSecondIndex]) { int temp = dev_values[firstIndex]; dev_values[firstIndex] = dev_values[XoredSecondIndex]; dev_values[XoredSecondIndex] = temp; } } } } /* Main function call. Created array and calls stepskernel based of the size of the bitonic sequences and step. */ void bitonic_sort(int *values) { int *dev_values; size_t size = elementsToSort* sizeof(int); //Allocate memory on the device then copy our host array to device pointer cudaMalloc((void**)&dev_values, size); cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice); dim3 blocks(deviceBlocks, 1); dim3 threads(threadsPerBlock, 1); int step, phaseLength; for (phaseLength = 2; phaseLength <= elementsToSort; phaseLength <<= 1) { for (step = phaseLength >> 1; step>0; step = step >> 1) { stepskernel << <blocks, threads >> >(dev_values, step, phaseLength); } } cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost); cudaFree(dev_values); } void preExecution(){ int values[7]; values[0] = 10; values[1] = 13; values[2] = 9; values[3] = 18; values[4] = 26; values[4] = 100; values[6] = 3; bitonic_sort(values); } int main(void) { executionCount = getMaxProcessCount(); int fixedExecutionCount = executionCount; preExecution(); bool runSort = true; runSort = runSortAgain(); //Pointers to store our results that we're writing to CSV files, allocate space entered buy the user int* threadCounts = (int*)malloc(executionCount*sizeof(int)); int* allBlocks = (int*)malloc(executionCount*sizeof(int));; double* timeResults = (double*)malloc(executionCount*sizeof(double));; char* arrayStates = (char*)malloc(executionCount*sizeof(char)); double time; clock_t start, stop; //Counter so we can assine values to the array in the execution loop while (runSort && executionCount != 0){ //Get thread, blocks and element count //Get total elements and suggested block thread configurations blockAndThreadCounts inputCountandSuggestedThreadBlockCount; inputCountandSuggestedThreadBlockCount = getElementCounts(); elementsToSort = inputCountandSuggestedThreadBlockCount.elementCount; //wirte possible thread and block configurations to text file printf("Writing suggested block thread configuration..."); writeSuggestedBlockThreadConfigToCsv(inputCountandSuggestedThreadBlockCount.threadCounts, inputCountandSuggestedThreadBlockCount.blockCounts, inputCountandSuggestedThreadBlockCount.combinationsCount ); printf("Done \n"); //elementsToSort = inputCountandSuggestedThreadBlockCount.elementCount; deviceBlocks = getBlockCount(); threadsPerBlock = getThreadCount(); threadCount = threadsPerBlock * deviceBlocks; //Malloc array, add values to it and write unsorted array to csv file int* values = (int*)malloc(elementsToSort*sizeof(int)); createUnsortedArray(values); writeBlockElementCsvFile(values, "preSorted", threadCount, deviceBlocks); //Do Sort and time it start = clock(); bitonic_sort(values); stop = clock(); time = getElapsedTime(start, stop); char* arrayState; char arrayStateChar; if (isSorted(values)){ printf("Is Sorted \n"); arrayState = "sorted"; arrayStateChar = 's'; } else{ printf("Not Sorted \n"); arrayState = "unsorted"; arrayStateChar = 'u'; } writeBlockElementCsvFile(values, arrayState, threadCount, deviceBlocks); //Allocate results values to pointers *threadCounts = threadCount; *allBlocks = deviceBlocks; *timeResults = time; *arrayStates = arrayStateChar; //Increment Result pointers threadCounts++; allBlocks++; timeResults++; arrayStates++; free(values); //Check again for user input executionCount--; } printf("Execution ended. Writing results to C:\BitonicSortArrayCSVFiles /n"); writeSortResultsToCsv(timeResults, "ParallelBitonicSort", arrayStates, threadCounts, allBlocks, fixedExecutionCount); getchar(); }
4baaf09facb14b0996c16c51187f8e022d606da7.hip
// !!! This is a file automatically generated by hipify!!! #include "GPUVolumetricShadingKernels.cuh" #include <device_launch_parameters.h> #include <sutil/vec_math.h> #include "../../Shaders/CppCommon/Half4.h" using namespace WaveFront; GPU_ONLY void VolumetricShadeDirect( PixelIndex a_PixelIndex, const uint3 a_ResolutionAndDepth, const WaveFront::VolumetricData* a_VolumetricDataBuffer, WaveFront::AtomicBuffer<WaveFront::ShadowRayData>* const a_ShadowRays, const AtomicBuffer<TriangleLight>* const a_Lights, unsigned int& a_Seed, const CDF* const a_CDF, hipSurfaceObject_t a_Output) { const unsigned int pixelDataIndex = PIXEL_DATA_INDEX(a_PixelIndex.m_X, a_PixelIndex.m_Y, a_ResolutionAndDepth.x); const auto& intersection = a_VolumetricDataBuffer[pixelDataIndex]; if (intersection.m_ExitIntersectionT > intersection.m_EntryIntersectionT) { //Volume ray marching settings (these need to be moved elsewhere or replaced with more sensible parameters) const int MAX_STEPS = 5; const float DENSITY_PER_CENTIMETER = intersection.m_Density; const float VOLUME_COLOR_R = 1.0f; const float VOLUME_COLOR_G = 1.0f; const float VOLUME_COLOR_B = 1.0f; //Sample volume float distance = intersection.m_ExitIntersectionT - intersection.m_EntryIntersectionT; float accumulatedDensity = 0.0f; //Calculate appropriate step size float stepSize = distance / MAX_STEPS; float3 prevSamplePosition = intersection.m_PositionEntry; float offset = RandomFloat(a_Seed) * stepSize; //This is used to offset each ray into the screen by a small amount, sampling different parts of the volume for (int i = 0; i < MAX_STEPS && accumulatedDensity < 1.0f && (float)i * stepSize < distance; i++) { float sampleT = (float)i * stepSize + offset; float3 samplePosition = intersection.m_PositionEntry + intersection.m_IncomingRayDirection * sampleT; float distanceSincePrevSample = length(samplePosition - prevSamplePosition); prevSamplePosition = samplePosition; //Pick a light from the CDF unsigned index; float pdf; a_CDF->Get(RandomFloat(a_Seed), index, pdf); auto& light = *a_Lights->GetData(index); //Pick random point on light const float u = RandomFloat(a_Seed); const float v = RandomFloat(a_Seed) * (1.f - u); float3 arm1 = light.p1 - light.p0; float3 arm2 = light.p2 - light.p0; float3 lightCenter = light.p0 + (arm1 * u) + (arm2 * v); //Direction from pixel to light float3 pixelToLightDir = lightCenter - samplePosition; //Light distance from pixel const float lDistance = length(pixelToLightDir); //Normalize pixelToLightDir /= lDistance; float sampledDensity = DENSITY_PER_CENTIMETER * distanceSincePrevSample; auto volumeColor = make_float3(VOLUME_COLOR_R, VOLUME_COLOR_G, VOLUME_COLOR_B) * 0.01f; ShadowRayData shadowRay( a_PixelIndex, samplePosition, pixelToLightDir, lDistance - 0.2f, volumeColor, LightChannel::VOLUMETRIC); a_ShadowRays->Add(&shadowRay); accumulatedDensity += sampledDensity; } //printf("Density: %d", accumulatedDensity); half4Ushort4 color{ make_float4(0.f, 0.f, 0.f, accumulatedDensity) }; surf2Dwrite<ushort4>( color.m_Ushort4, a_Output, a_PixelIndex.m_X * sizeof(ushort4), a_PixelIndex.m_Y, hipBoundaryModeTrap); //surf2DLayeredwrite<float4>( // make_float4(0.f, 0.f, 0.f, accumulatedDensity), // a_Output, // a_PixelIndex.m_X * sizeof(float4), // a_PixelIndex.m_Y, // static_cast<unsigned>(LightChannel::VOLUMETRIC), // hipBoundaryModeTrap); } return; }
4baaf09facb14b0996c16c51187f8e022d606da7.cu
#include "GPUVolumetricShadingKernels.cuh" #include <device_launch_parameters.h> #include <sutil/vec_math.h> #include "../../Shaders/CppCommon/Half4.h" using namespace WaveFront; GPU_ONLY void VolumetricShadeDirect( PixelIndex a_PixelIndex, const uint3 a_ResolutionAndDepth, const WaveFront::VolumetricData* a_VolumetricDataBuffer, WaveFront::AtomicBuffer<WaveFront::ShadowRayData>* const a_ShadowRays, const AtomicBuffer<TriangleLight>* const a_Lights, unsigned int& a_Seed, const CDF* const a_CDF, cudaSurfaceObject_t a_Output) { const unsigned int pixelDataIndex = PIXEL_DATA_INDEX(a_PixelIndex.m_X, a_PixelIndex.m_Y, a_ResolutionAndDepth.x); const auto& intersection = a_VolumetricDataBuffer[pixelDataIndex]; if (intersection.m_ExitIntersectionT > intersection.m_EntryIntersectionT) { //Volume ray marching settings (these need to be moved elsewhere or replaced with more sensible parameters) const int MAX_STEPS = 5; const float DENSITY_PER_CENTIMETER = intersection.m_Density; const float VOLUME_COLOR_R = 1.0f; const float VOLUME_COLOR_G = 1.0f; const float VOLUME_COLOR_B = 1.0f; //Sample volume float distance = intersection.m_ExitIntersectionT - intersection.m_EntryIntersectionT; float accumulatedDensity = 0.0f; //Calculate appropriate step size float stepSize = distance / MAX_STEPS; float3 prevSamplePosition = intersection.m_PositionEntry; float offset = RandomFloat(a_Seed) * stepSize; //This is used to offset each ray into the screen by a small amount, sampling different parts of the volume for (int i = 0; i < MAX_STEPS && accumulatedDensity < 1.0f && (float)i * stepSize < distance; i++) { float sampleT = (float)i * stepSize + offset; float3 samplePosition = intersection.m_PositionEntry + intersection.m_IncomingRayDirection * sampleT; float distanceSincePrevSample = length(samplePosition - prevSamplePosition); prevSamplePosition = samplePosition; //Pick a light from the CDF unsigned index; float pdf; a_CDF->Get(RandomFloat(a_Seed), index, pdf); auto& light = *a_Lights->GetData(index); //Pick random point on light const float u = RandomFloat(a_Seed); const float v = RandomFloat(a_Seed) * (1.f - u); float3 arm1 = light.p1 - light.p0; float3 arm2 = light.p2 - light.p0; float3 lightCenter = light.p0 + (arm1 * u) + (arm2 * v); //Direction from pixel to light float3 pixelToLightDir = lightCenter - samplePosition; //Light distance from pixel const float lDistance = length(pixelToLightDir); //Normalize pixelToLightDir /= lDistance; float sampledDensity = DENSITY_PER_CENTIMETER * distanceSincePrevSample; auto volumeColor = make_float3(VOLUME_COLOR_R, VOLUME_COLOR_G, VOLUME_COLOR_B) * 0.01f; ShadowRayData shadowRay( a_PixelIndex, samplePosition, pixelToLightDir, lDistance - 0.2f, volumeColor, LightChannel::VOLUMETRIC); a_ShadowRays->Add(&shadowRay); accumulatedDensity += sampledDensity; } //printf("Density: %d", accumulatedDensity); half4Ushort4 color{ make_float4(0.f, 0.f, 0.f, accumulatedDensity) }; surf2Dwrite<ushort4>( color.m_Ushort4, a_Output, a_PixelIndex.m_X * sizeof(ushort4), a_PixelIndex.m_Y, cudaBoundaryModeTrap); //surf2DLayeredwrite<float4>( // make_float4(0.f, 0.f, 0.f, accumulatedDensity), // a_Output, // a_PixelIndex.m_X * sizeof(float4), // a_PixelIndex.m_Y, // static_cast<unsigned>(LightChannel::VOLUMETRIC), // cudaBoundaryModeTrap); } return; }
be8b4b6654c01184e4c69a01555839e7b5293a73.hip
// !!! This is a file automatically generated by hipify!!! #include <mpi.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include "ext_mpi_alltoall_native_gpu.h" int main (int argc, char **argv) { int *sendbuf_host, *recvbuf_host, *sendbuf_device, *recvbuf_device, msize, handle, i, *sendcounts, *recvcounts, *sdispls, *rdispls, num_cores, num_ports, num_active_ports, throttle; double start, stop, deltatmin_ref, deltatmax_ref, deltatmin_my, deltatmax_my, ttt; // Initialize the MPI environment MPI_Init (NULL, NULL); // Get the number of processes int world_size; MPI_Comm_size (MPI_COMM_WORLD, &world_size); // Get the rank of the process int world_rank; MPI_Comm_rank (MPI_COMM_WORLD, &world_rank); num_cores = 12; num_ports = world_size; num_active_ports = num_cores; throttle = world_size; srand (time (NULL) + world_rank); sendcounts = (int *) malloc (world_size * sizeof (int)); recvcounts = (int *) malloc (world_size * sizeof (int)); sdispls = (int *) malloc ((world_size + 1) * sizeof (int)); rdispls = (int *) malloc ((world_size + 1) * sizeof (int)); for (i = 0; i < world_size; i++) { sendcounts[i] = rand () % 10 + 1; } MPI_Alltoall (sendcounts, 1, MPI_INT, recvcounts, 1, MPI_INT, MPI_COMM_WORLD); sdispls[0] = 0; rdispls[0] = 0; for (i = 1; i < world_size + 1; i++) { sdispls[i] = sdispls[i - 1] + sendcounts[i - 1]; rdispls[i] = rdispls[i - 1] + recvcounts[i - 1]; } sendbuf_host = (int *) malloc (sdispls[world_size] * sizeof (int)); recvbuf_host = (int *) malloc (rdispls[world_size] * sizeof (int)); if (hipMalloc (&sendbuf_device, sdispls[world_size] * sizeof (int)) != 0) exit (2); if (hipMalloc (&recvbuf_device, rdispls[world_size] * sizeof (int)) != 0) exit (2); for (i = 0; i < sdispls[world_size]; i++) { sendbuf_host[i] = -22; } for (i = 0; i < rdispls[world_size]; i++) { recvbuf_host[i] = -11; } for (i = 0; i < world_size; i++) { sendbuf_host[sdispls[i]] = world_rank + i * world_size; } handle = EXT_MPI_Alltoallv_init_native_gpu (sendbuf_device, sendcounts, sdispls, MPI_INT, recvbuf_device, recvcounts, rdispls, MPI_INT, MPI_COMM_WORLD, num_cores, MPI_COMM_NULL, 1, num_active_ports, throttle); if (hipMemcpy (sendbuf_device, sendbuf_host, sdispls[world_size] * sizeof (int), hipMemcpyHostToDevice) != 0) exit (2); EXT_MPI_Alltoall_exec_native_gpu (handle); if (hipMemcpy (recvbuf_host, recvbuf_device, rdispls[world_size] * sizeof (int), hipMemcpyDeviceToHost) != 0) exit (2); EXT_MPI_Alltoall_done_native_gpu (handle); // printf ("aaaaaaaaa %d ", world_rank); for (i = 0; i < world_size; i++) { // printf ("%d ", recvbuf_host[rdispls[i]]); } // printf ("\n"); //MPI_Finalize(); //exit(1); free (rdispls); free (sdispls); free (recvcounts); free (sendbuf_host); free (recvbuf_host); hipFree (sendbuf_device); hipFree (recvbuf_device); msize = 1000; if (argc == 2) { msize = atoi (argv[1]); } if (hipMalloc (&sendbuf_device, msize * sizeof (char) * world_size) != 0) exit (2); if (hipMemset (sendbuf_device, 0, msize) != 0) exit (2); if (hipMalloc (&recvbuf_device, msize * sizeof (char) * world_size) != 0) exit (2); if (hipMemset (recvbuf_device, 0, msize) != 0) exit (2); hipDeviceSynchronize (); MPI_Barrier (MPI_COMM_WORLD); start = MPI_Wtime (); for (i = 0; i < 10000; i++) { MPI_Alltoall (sendbuf_device, msize, MPI_CHAR, recvbuf_device, msize, MPI_CHAR, MPI_COMM_WORLD); // MPI_Barrier(MPI_COMM_WORLD); } hipDeviceSynchronize (); stop = MPI_Wtime (); ttt = stop - start; MPI_Reduce (&ttt, (void *) &deltatmin_ref, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD); MPI_Reduce (&ttt, (void *) &deltatmax_ref, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); for (i = 0; i < world_size; i++) { sendcounts[i] = msize; } handle = EXT_MPI_Alltoallv_init_native_gpu (sendbuf_device, sendcounts, NULL, MPI_CHAR, recvbuf_device, NULL, NULL, MPI_CHAR, MPI_COMM_WORLD, num_cores, MPI_COMM_NULL, 1, num_active_ports, throttle); hipDeviceSynchronize (); MPI_Barrier (MPI_COMM_WORLD); start = MPI_Wtime (); for (i = 0; i < 10000; i++) { EXT_MPI_Alltoall_exec_native_gpu (handle); // MPI_Barrier(MPI_COMM_WORLD); } hipDeviceSynchronize (); stop = MPI_Wtime (); ttt = stop - start; MPI_Reduce (&ttt, (void *) &deltatmin_my, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD); MPI_Reduce (&ttt, (void *) &deltatmax_my, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); free (sendcounts); EXT_MPI_Alltoall_done_native_gpu (handle); hipDeviceSynchronize (); MPI_Barrier (MPI_COMM_WORLD); start = MPI_Wtime (); for (i = 0; i < 10000; i++) { MPI_Alltoall (sendbuf_device, msize, MPI_CHAR, recvbuf_device, msize, MPI_CHAR, MPI_COMM_WORLD); // MPI_Barrier(MPI_COMM_WORLD); } hipDeviceSynchronize (); stop = MPI_Wtime (); ttt = stop - start; MPI_Reduce (&ttt, (void *) &deltatmin_ref, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD); MPI_Reduce (&ttt, (void *) &deltatmax_ref, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (world_rank == 0) { printf ("reference %d %d %d %d %d %d %e %e %e %e\n", world_size, num_cores, msize, num_ports, num_active_ports, throttle, deltatmin_ref, deltatmax_ref, deltatmin_my, deltatmax_my); } MPI_Finalize (); }
be8b4b6654c01184e4c69a01555839e7b5293a73.cu
#include <mpi.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include "ext_mpi_alltoall_native_gpu.h" int main (int argc, char **argv) { int *sendbuf_host, *recvbuf_host, *sendbuf_device, *recvbuf_device, msize, handle, i, *sendcounts, *recvcounts, *sdispls, *rdispls, num_cores, num_ports, num_active_ports, throttle; double start, stop, deltatmin_ref, deltatmax_ref, deltatmin_my, deltatmax_my, ttt; // Initialize the MPI environment MPI_Init (NULL, NULL); // Get the number of processes int world_size; MPI_Comm_size (MPI_COMM_WORLD, &world_size); // Get the rank of the process int world_rank; MPI_Comm_rank (MPI_COMM_WORLD, &world_rank); num_cores = 12; num_ports = world_size; num_active_ports = num_cores; throttle = world_size; srand (time (NULL) + world_rank); sendcounts = (int *) malloc (world_size * sizeof (int)); recvcounts = (int *) malloc (world_size * sizeof (int)); sdispls = (int *) malloc ((world_size + 1) * sizeof (int)); rdispls = (int *) malloc ((world_size + 1) * sizeof (int)); for (i = 0; i < world_size; i++) { sendcounts[i] = rand () % 10 + 1; } MPI_Alltoall (sendcounts, 1, MPI_INT, recvcounts, 1, MPI_INT, MPI_COMM_WORLD); sdispls[0] = 0; rdispls[0] = 0; for (i = 1; i < world_size + 1; i++) { sdispls[i] = sdispls[i - 1] + sendcounts[i - 1]; rdispls[i] = rdispls[i - 1] + recvcounts[i - 1]; } sendbuf_host = (int *) malloc (sdispls[world_size] * sizeof (int)); recvbuf_host = (int *) malloc (rdispls[world_size] * sizeof (int)); if (cudaMalloc (&sendbuf_device, sdispls[world_size] * sizeof (int)) != 0) exit (2); if (cudaMalloc (&recvbuf_device, rdispls[world_size] * sizeof (int)) != 0) exit (2); for (i = 0; i < sdispls[world_size]; i++) { sendbuf_host[i] = -22; } for (i = 0; i < rdispls[world_size]; i++) { recvbuf_host[i] = -11; } for (i = 0; i < world_size; i++) { sendbuf_host[sdispls[i]] = world_rank + i * world_size; } handle = EXT_MPI_Alltoallv_init_native_gpu (sendbuf_device, sendcounts, sdispls, MPI_INT, recvbuf_device, recvcounts, rdispls, MPI_INT, MPI_COMM_WORLD, num_cores, MPI_COMM_NULL, 1, num_active_ports, throttle); if (cudaMemcpy (sendbuf_device, sendbuf_host, sdispls[world_size] * sizeof (int), cudaMemcpyHostToDevice) != 0) exit (2); EXT_MPI_Alltoall_exec_native_gpu (handle); if (cudaMemcpy (recvbuf_host, recvbuf_device, rdispls[world_size] * sizeof (int), cudaMemcpyDeviceToHost) != 0) exit (2); EXT_MPI_Alltoall_done_native_gpu (handle); // printf ("aaaaaaaaa %d ", world_rank); for (i = 0; i < world_size; i++) { // printf ("%d ", recvbuf_host[rdispls[i]]); } // printf ("\n"); //MPI_Finalize(); //exit(1); free (rdispls); free (sdispls); free (recvcounts); free (sendbuf_host); free (recvbuf_host); cudaFree (sendbuf_device); cudaFree (recvbuf_device); msize = 1000; if (argc == 2) { msize = atoi (argv[1]); } if (cudaMalloc (&sendbuf_device, msize * sizeof (char) * world_size) != 0) exit (2); if (cudaMemset (sendbuf_device, 0, msize) != 0) exit (2); if (cudaMalloc (&recvbuf_device, msize * sizeof (char) * world_size) != 0) exit (2); if (cudaMemset (recvbuf_device, 0, msize) != 0) exit (2); cudaDeviceSynchronize (); MPI_Barrier (MPI_COMM_WORLD); start = MPI_Wtime (); for (i = 0; i < 10000; i++) { MPI_Alltoall (sendbuf_device, msize, MPI_CHAR, recvbuf_device, msize, MPI_CHAR, MPI_COMM_WORLD); // MPI_Barrier(MPI_COMM_WORLD); } cudaDeviceSynchronize (); stop = MPI_Wtime (); ttt = stop - start; MPI_Reduce (&ttt, (void *) &deltatmin_ref, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD); MPI_Reduce (&ttt, (void *) &deltatmax_ref, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); for (i = 0; i < world_size; i++) { sendcounts[i] = msize; } handle = EXT_MPI_Alltoallv_init_native_gpu (sendbuf_device, sendcounts, NULL, MPI_CHAR, recvbuf_device, NULL, NULL, MPI_CHAR, MPI_COMM_WORLD, num_cores, MPI_COMM_NULL, 1, num_active_ports, throttle); cudaDeviceSynchronize (); MPI_Barrier (MPI_COMM_WORLD); start = MPI_Wtime (); for (i = 0; i < 10000; i++) { EXT_MPI_Alltoall_exec_native_gpu (handle); // MPI_Barrier(MPI_COMM_WORLD); } cudaDeviceSynchronize (); stop = MPI_Wtime (); ttt = stop - start; MPI_Reduce (&ttt, (void *) &deltatmin_my, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD); MPI_Reduce (&ttt, (void *) &deltatmax_my, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); free (sendcounts); EXT_MPI_Alltoall_done_native_gpu (handle); cudaDeviceSynchronize (); MPI_Barrier (MPI_COMM_WORLD); start = MPI_Wtime (); for (i = 0; i < 10000; i++) { MPI_Alltoall (sendbuf_device, msize, MPI_CHAR, recvbuf_device, msize, MPI_CHAR, MPI_COMM_WORLD); // MPI_Barrier(MPI_COMM_WORLD); } cudaDeviceSynchronize (); stop = MPI_Wtime (); ttt = stop - start; MPI_Reduce (&ttt, (void *) &deltatmin_ref, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD); MPI_Reduce (&ttt, (void *) &deltatmax_ref, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (world_rank == 0) { printf ("reference %d %d %d %d %d %d %e %e %e %e\n", world_size, num_cores, msize, num_ports, num_active_ports, throttle, deltatmin_ref, deltatmax_ref, deltatmin_my, deltatmax_my); } MPI_Finalize (); }
2e06fd3cea84ab19824986a410c147e6c5ecafd9.hip
// !!! This is a file automatically generated by hipify!!! #include "GPU_Metric.h" #include "Hash_Utils.h" #include "GPU_Hash.h" #include <hip/hip_runtime.h> #include <string.h> #include <stdio.h> #include <math.h> #include <stdlib.h> #include <iostream> #include <fstream> #include <sstream> #include <chrono> #include <string> std::ostream &operator<<(std::ostream &os, const pkt_tuple &tuple); std::ostream &operator<<(std::ostream &os, const flow_features &flow); std::ostream &operator<<(std::ostream &os, const features &bidirec_flow); __device__ void init_new_entry(struct features *bidirec_flow, struct pkt_tuple *tuple, struct pkt_info *info){ switch(tuple->proto){ case 6: bidirec_flow->hlen = UDP_HEADER_LEN; break; case 17: bidirec_flow->hlen = TCP_HEADER_LEN; break; } bidirec_flow->first_timestamp = info->timestamp; bidirec_flow->last_timestamp = info->timestamp; bidirec_flow->active_timestamp = info->timestamp; bidirec_flow->forward.timestamp = info->timestamp; bidirec_flow->forward.total_packets = 1; bidirec_flow->forward.total_volume = info->data_len; bidirec_flow->forward.min_pktl = info->data_len; bidirec_flow->forward.max_pktl = info->data_len; bidirec_flow->forward.sqsum_pktl = info->data_len * info->data_len; if(info->psh) ++bidirec_flow->forward.psh_cnt; if(info->urg) ++bidirec_flow->forward.urg_cnt; bidirec_flow->forward.total_hlen = bidirec_flow->hlen; } __device__ void update_entry(hash_entry *entry, struct pkt_tuple *tuple, struct pkt_info *info){ int diff = info->timestamp - entry->bidirec_flow.last_timestamp; if(diff > IDLE_THRESHOLD){ int cur_active = entry->bidirec_flow.last_timestamp - entry->bidirec_flow.active_timestamp; if(cur_active > 0){ if(cur_active < entry->bidirec_flow.min_active || entry->bidirec_flow.min_active == 0) entry->bidirec_flow.min_active = cur_active; if(cur_active > entry->bidirec_flow.max_active) entry->bidirec_flow.max_active = cur_active; entry->bidirec_flow.sum_active += cur_active; entry->bidirec_flow.sqsum_active += cur_active * cur_active; ++entry->bidirec_flow.active_times; } if(diff < entry->bidirec_flow.min_idle || entry->bidirec_flow.min_idle == 0) entry->bidirec_flow.min_idle = diff; if(diff > entry->bidirec_flow.max_idle) entry->bidirec_flow.max_idle = diff; entry->bidirec_flow.sum_idle += diff; entry->bidirec_flow.sqsum_idle += diff * diff; ++entry->bidirec_flow.idle_times; entry->bidirec_flow.active_timestamp = info->timestamp; } entry->bidirec_flow.last_timestamp = info->timestamp; struct flow_features *flow; if(tuple->src_ip == entry->tuple.src_ip) flow = &(entry->bidirec_flow.forward); else flow = &(entry->bidirec_flow.backward); flow->total_hlen += entry->bidirec_flow.hlen; if(info->psh) ++flow->psh_cnt; if(info->urg) ++flow->urg_cnt; if(flow->total_packets == 0){ flow->timestamp = info->timestamp; flow->total_packets = 1; flow->total_volume = info->data_len; flow->min_pktl = info->data_len; flow->max_pktl = info->data_len; flow->sqsum_pktl = info->data_len * info->data_len; return; } ++flow->total_packets; flow->total_volume += info->data_len; if(flow->min_pktl > info->data_len) flow->min_pktl = info->data_len; if(flow->max_pktl < info->data_len) flow->max_pktl = info->data_len; flow->sqsum_pktl += info->data_len * info->data_len; int interval = info->timestamp - flow->timestamp; flow->timestamp = info->timestamp; if(flow->total_packets == 2){ flow->min_iat = interval; flow->max_iat = interval; flow->sum_iat = interval; flow->sqsum_iat = interval * interval; } else{ if(flow->min_iat > interval) flow->min_iat = interval; if(flow->max_iat < interval) flow->max_iat = interval; flow->sum_iat += interval; flow->sqsum_iat += interval * interval; } } __global__ void gpu_extract_features(struct pkt_tuple_info *pkts, int *first_index, hash_entry *hash_table){ int index = blockDim.x * blockIdx.x + threadIdx.x; int cur_index = first_index[index]; int num = 0; hash_entry *entry; while(cur_index != -1){ entry = find_entry(hash_table, HASH_ENTRIES, pkts[cur_index].hash, pkts[cur_index].tuple); if(entry == NULL){ struct features bidirec_flow = {0}; init_new_entry(&bidirec_flow, &(pkts[cur_index].tuple), &(pkts[cur_index].info)); insert_entry(hash_table, HASH_ENTRIES, pkts[cur_index].hash, pkts[cur_index].tuple, bidirec_flow); } else update_entry(entry, &(pkts[cur_index].tuple), &(pkts[cur_index].info)); ++num; cur_index = pkts[cur_index].next_index; } //printf("thread%d process %d packets\n", index, num); //printf("thread%d process %dth packet\n", index, cur_index); } __global__ void gpu_update_hash_table(hash_entry *src_table, hash_entry *dst_table, uint64_t update_timestamp){ int index = blockDim.x * blockIdx.x + threadIdx.x; int N = HASH_ENTRIES / TotalThreads + 1; int start = index * N; int end = start + N; if(end > HASH_ENTRIES) end = HASH_ENTRIES; for(int i = start; i < end; ++i){ if(src_table[i].state != EMPTY_ENTRY && update_timestamp - src_table[i].bidirec_flow.last_timestamp < TIMEOUT_THRESHOLD){ insert_entry(dst_table, HASH_ENTRIES, src_table[i].hash, src_table[i].tuple, src_table[i].bidirec_flow); } } } GPU_Metric::GPU_Metric(): force_quit(false), array_num(0), max_array_num(0), has_data(false), copy_to_cpu(false), update_hash_table(false) { gpu_hash_table[0] = create_hash_table(HASH_ENTRIES); gpu_hash_table[1] = create_hash_table(HASH_ENTRIES); hash_table = gpu_hash_table[0]; cpu_hash_table = new hash_entry[HASH_ENTRIES]; cpu_first_index = new int[TotalThreads]; cpu_last_index = new int[TotalThreads]; memset(cpu_first_index, -1, TotalThreads * sizeof(int)); memset(cpu_last_index, -1, TotalThreads * sizeof(int)); hipMalloc(&gpu_pkts, ArraySize * sizeof(struct pkt_tuple_info)); hipMalloc(&gpu_first_index, TotalThreads * sizeof(int)); sockfd = socket(AF_INET, SOCK_DGRAM, 0); memset(&servaddr, 0, sizeof(servaddr)); servaddr.sin_family = AF_INET; servaddr.sin_port = htons(PORT); servaddr.sin_addr.s_addr = INADDR_ANY; } GPU_Metric::~GPU_Metric(){ free_hash_table(gpu_hash_table[0]); free_hash_table(gpu_hash_table[1]); delete []cpu_hash_table; delete []cpu_first_index; delete []cpu_last_index; hipFree(gpu_pkts); hipFree(gpu_first_index); close(sockfd); } void GPU_Metric::process(){ struct pkt_tuple_info *pkts; while(!force_quit){ if(has_data){ q_m.lock(); pkts = q.front(); q.pop(); if(array_num > max_array_num) max_array_num = array_num; --array_num; if(array_num == 0) has_data = false; q_m.unlock(); process_pkts(pkts); } } } void GPU_Metric::process_pkts(struct pkt_tuple_info *pkts){ int index; for(int i = 0; i < ArraySize; ++i){ index = pkts[i].hash % TotalThreads; if(cpu_first_index[index] == -1){ cpu_first_index[index] = i; cpu_last_index[index] = i; } else{ pkts[cpu_last_index[index]].next_index = i; cpu_last_index[index] = i; } } hipDeviceSynchronize(); if(update_hash_table){ hash_entry *pre_hash_table = hash_table; if(hash_table == gpu_hash_table[0]){ hash_table = gpu_hash_table[1]; } else{ hash_table = gpu_hash_table[0]; } hipLaunchKernelGGL(( gpu_update_hash_table), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, pre_hash_table, hash_table, update_timestamp); hipDeviceSynchronize(); hipMemset(pre_hash_table, 0, HASH_ENTRIES * sizeof(hash_entry)); update_hash_table = false; } hipMemcpy(gpu_pkts, pkts, ArraySize * sizeof(struct pkt_tuple_info), hipMemcpyHostToDevice); hipMemcpy(gpu_first_index, cpu_first_index, TotalThreads * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( gpu_extract_features), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, gpu_pkts, gpu_first_index, hash_table); memset(cpu_first_index, -1, TotalThreads * sizeof(int)); memset(cpu_last_index, -1, TotalThreads * sizeof(int)); free(pkts); if(copy_to_cpu){ hipMemcpy(cpu_hash_table, hash_table, HASH_ENTRIES * sizeof(hash_entry), hipMemcpyDeviceToHost); copy_to_cpu = false; } } void GPU_Metric::clean_flow_stats(uint64_t timestamp){ update_timestamp = timestamp; update_hash_table = true; auto start = std::chrono::system_clock::now(); auto end = std::chrono::system_clock::now(); std::chrono::duration<double> diff; while(update_hash_table){ end = std::chrono::system_clock::now(); diff = end - start; if(diff.count() > 2){ std::cout << "do not need to update hash table" << std::endl; update_hash_table = false; return; } } end = std::chrono::system_clock::now(); diff = end - start; std::cout << "updated hash table, used " << diff.count() << "s!" << std::endl; } void GPU_Metric::gpu_output_stats(){ std::cout << "max_array_num = " << max_array_num << std::endl; copy_to_cpu = true; auto start = std::chrono::system_clock::now(); auto end = std::chrono::system_clock::now(); std::chrono::duration<double> diff; while(copy_to_cpu){ end = std::chrono::system_clock::now(); diff = end - start; if(diff.count() > 1){ std::cout << "no features to record" << std::endl; copy_to_cpu = false; return; } } std::cout << "copied hash_table to cpu_hash_table" << std::endl; //output_to_file(); output_through_udp(); } void GPU_Metric::output_to_file(){ std::ofstream ofs("gpu_out", std::ofstream::app); ofs << "Output begin!" << std::endl; auto start = std::chrono::system_clock::now(); for(int i = 0; i < HASH_ENTRIES; ++i){ if(cpu_hash_table[i].state != 0){ ofs << cpu_hash_table[i].tuple << " " << cpu_hash_table[i].bidirec_flow << std::endl; } } auto end = std::chrono::system_clock::now(); std::chrono::duration<double> diff = end - start; ofs << "Output end, used " << diff.count() << "s!" << std::endl; ofs.close(); } void GPU_Metric::output_through_udp(){ //auto start = std::chrono::system_clock::now(); int index = 0; while(index < HASH_ENTRIES){ std::ostringstream oss; while(oss.tellp() < 1470 && index < HASH_ENTRIES){ if(cpu_hash_table[index].state != 0){ oss << cpu_hash_table[index].tuple << " " << cpu_hash_table[index].bidirec_flow << std::endl; } ++index; } std::string msg = oss.str(); sendto(sockfd, msg.c_str(), msg.size(), MSG_CONFIRM, (const struct sockaddr *)&servaddr, sizeof(servaddr)); } // auto end = std::chrono::system_clock::now(); // std::chrono::duration<double> diff = end - start; // std::string msg("Output end, used "); // msg += std::to_string(diff.count()); // msg += "\n"; // sendto(sockfd, msg.c_str(), msg.size(), MSG_CONFIRM, (const struct sockaddr *)&servaddr, sizeof(servaddr)); }
2e06fd3cea84ab19824986a410c147e6c5ecafd9.cu
#include "GPU_Metric.h" #include "Hash_Utils.h" #include "GPU_Hash.h" #include <cuda_runtime.h> #include <string.h> #include <stdio.h> #include <math.h> #include <stdlib.h> #include <iostream> #include <fstream> #include <sstream> #include <chrono> #include <string> std::ostream &operator<<(std::ostream &os, const pkt_tuple &tuple); std::ostream &operator<<(std::ostream &os, const flow_features &flow); std::ostream &operator<<(std::ostream &os, const features &bidirec_flow); __device__ void init_new_entry(struct features *bidirec_flow, struct pkt_tuple *tuple, struct pkt_info *info){ switch(tuple->proto){ case 6: bidirec_flow->hlen = UDP_HEADER_LEN; break; case 17: bidirec_flow->hlen = TCP_HEADER_LEN; break; } bidirec_flow->first_timestamp = info->timestamp; bidirec_flow->last_timestamp = info->timestamp; bidirec_flow->active_timestamp = info->timestamp; bidirec_flow->forward.timestamp = info->timestamp; bidirec_flow->forward.total_packets = 1; bidirec_flow->forward.total_volume = info->data_len; bidirec_flow->forward.min_pktl = info->data_len; bidirec_flow->forward.max_pktl = info->data_len; bidirec_flow->forward.sqsum_pktl = info->data_len * info->data_len; if(info->psh) ++bidirec_flow->forward.psh_cnt; if(info->urg) ++bidirec_flow->forward.urg_cnt; bidirec_flow->forward.total_hlen = bidirec_flow->hlen; } __device__ void update_entry(hash_entry *entry, struct pkt_tuple *tuple, struct pkt_info *info){ int diff = info->timestamp - entry->bidirec_flow.last_timestamp; if(diff > IDLE_THRESHOLD){ int cur_active = entry->bidirec_flow.last_timestamp - entry->bidirec_flow.active_timestamp; if(cur_active > 0){ if(cur_active < entry->bidirec_flow.min_active || entry->bidirec_flow.min_active == 0) entry->bidirec_flow.min_active = cur_active; if(cur_active > entry->bidirec_flow.max_active) entry->bidirec_flow.max_active = cur_active; entry->bidirec_flow.sum_active += cur_active; entry->bidirec_flow.sqsum_active += cur_active * cur_active; ++entry->bidirec_flow.active_times; } if(diff < entry->bidirec_flow.min_idle || entry->bidirec_flow.min_idle == 0) entry->bidirec_flow.min_idle = diff; if(diff > entry->bidirec_flow.max_idle) entry->bidirec_flow.max_idle = diff; entry->bidirec_flow.sum_idle += diff; entry->bidirec_flow.sqsum_idle += diff * diff; ++entry->bidirec_flow.idle_times; entry->bidirec_flow.active_timestamp = info->timestamp; } entry->bidirec_flow.last_timestamp = info->timestamp; struct flow_features *flow; if(tuple->src_ip == entry->tuple.src_ip) flow = &(entry->bidirec_flow.forward); else flow = &(entry->bidirec_flow.backward); flow->total_hlen += entry->bidirec_flow.hlen; if(info->psh) ++flow->psh_cnt; if(info->urg) ++flow->urg_cnt; if(flow->total_packets == 0){ flow->timestamp = info->timestamp; flow->total_packets = 1; flow->total_volume = info->data_len; flow->min_pktl = info->data_len; flow->max_pktl = info->data_len; flow->sqsum_pktl = info->data_len * info->data_len; return; } ++flow->total_packets; flow->total_volume += info->data_len; if(flow->min_pktl > info->data_len) flow->min_pktl = info->data_len; if(flow->max_pktl < info->data_len) flow->max_pktl = info->data_len; flow->sqsum_pktl += info->data_len * info->data_len; int interval = info->timestamp - flow->timestamp; flow->timestamp = info->timestamp; if(flow->total_packets == 2){ flow->min_iat = interval; flow->max_iat = interval; flow->sum_iat = interval; flow->sqsum_iat = interval * interval; } else{ if(flow->min_iat > interval) flow->min_iat = interval; if(flow->max_iat < interval) flow->max_iat = interval; flow->sum_iat += interval; flow->sqsum_iat += interval * interval; } } __global__ void gpu_extract_features(struct pkt_tuple_info *pkts, int *first_index, hash_entry *hash_table){ int index = blockDim.x * blockIdx.x + threadIdx.x; int cur_index = first_index[index]; int num = 0; hash_entry *entry; while(cur_index != -1){ entry = find_entry(hash_table, HASH_ENTRIES, pkts[cur_index].hash, pkts[cur_index].tuple); if(entry == NULL){ struct features bidirec_flow = {0}; init_new_entry(&bidirec_flow, &(pkts[cur_index].tuple), &(pkts[cur_index].info)); insert_entry(hash_table, HASH_ENTRIES, pkts[cur_index].hash, pkts[cur_index].tuple, bidirec_flow); } else update_entry(entry, &(pkts[cur_index].tuple), &(pkts[cur_index].info)); ++num; cur_index = pkts[cur_index].next_index; } //printf("thread%d process %d packets\n", index, num); //printf("thread%d process %dth packet\n", index, cur_index); } __global__ void gpu_update_hash_table(hash_entry *src_table, hash_entry *dst_table, uint64_t update_timestamp){ int index = blockDim.x * blockIdx.x + threadIdx.x; int N = HASH_ENTRIES / TotalThreads + 1; int start = index * N; int end = start + N; if(end > HASH_ENTRIES) end = HASH_ENTRIES; for(int i = start; i < end; ++i){ if(src_table[i].state != EMPTY_ENTRY && update_timestamp - src_table[i].bidirec_flow.last_timestamp < TIMEOUT_THRESHOLD){ insert_entry(dst_table, HASH_ENTRIES, src_table[i].hash, src_table[i].tuple, src_table[i].bidirec_flow); } } } GPU_Metric::GPU_Metric(): force_quit(false), array_num(0), max_array_num(0), has_data(false), copy_to_cpu(false), update_hash_table(false) { gpu_hash_table[0] = create_hash_table(HASH_ENTRIES); gpu_hash_table[1] = create_hash_table(HASH_ENTRIES); hash_table = gpu_hash_table[0]; cpu_hash_table = new hash_entry[HASH_ENTRIES]; cpu_first_index = new int[TotalThreads]; cpu_last_index = new int[TotalThreads]; memset(cpu_first_index, -1, TotalThreads * sizeof(int)); memset(cpu_last_index, -1, TotalThreads * sizeof(int)); cudaMalloc(&gpu_pkts, ArraySize * sizeof(struct pkt_tuple_info)); cudaMalloc(&gpu_first_index, TotalThreads * sizeof(int)); sockfd = socket(AF_INET, SOCK_DGRAM, 0); memset(&servaddr, 0, sizeof(servaddr)); servaddr.sin_family = AF_INET; servaddr.sin_port = htons(PORT); servaddr.sin_addr.s_addr = INADDR_ANY; } GPU_Metric::~GPU_Metric(){ free_hash_table(gpu_hash_table[0]); free_hash_table(gpu_hash_table[1]); delete []cpu_hash_table; delete []cpu_first_index; delete []cpu_last_index; cudaFree(gpu_pkts); cudaFree(gpu_first_index); close(sockfd); } void GPU_Metric::process(){ struct pkt_tuple_info *pkts; while(!force_quit){ if(has_data){ q_m.lock(); pkts = q.front(); q.pop(); if(array_num > max_array_num) max_array_num = array_num; --array_num; if(array_num == 0) has_data = false; q_m.unlock(); process_pkts(pkts); } } } void GPU_Metric::process_pkts(struct pkt_tuple_info *pkts){ int index; for(int i = 0; i < ArraySize; ++i){ index = pkts[i].hash % TotalThreads; if(cpu_first_index[index] == -1){ cpu_first_index[index] = i; cpu_last_index[index] = i; } else{ pkts[cpu_last_index[index]].next_index = i; cpu_last_index[index] = i; } } cudaDeviceSynchronize(); if(update_hash_table){ hash_entry *pre_hash_table = hash_table; if(hash_table == gpu_hash_table[0]){ hash_table = gpu_hash_table[1]; } else{ hash_table = gpu_hash_table[0]; } gpu_update_hash_table<<<numBlocks, threadsPerBlock>>>(pre_hash_table, hash_table, update_timestamp); cudaDeviceSynchronize(); cudaMemset(pre_hash_table, 0, HASH_ENTRIES * sizeof(hash_entry)); update_hash_table = false; } cudaMemcpy(gpu_pkts, pkts, ArraySize * sizeof(struct pkt_tuple_info), cudaMemcpyHostToDevice); cudaMemcpy(gpu_first_index, cpu_first_index, TotalThreads * sizeof(int), cudaMemcpyHostToDevice); gpu_extract_features<<<numBlocks, threadsPerBlock>>>(gpu_pkts, gpu_first_index, hash_table); memset(cpu_first_index, -1, TotalThreads * sizeof(int)); memset(cpu_last_index, -1, TotalThreads * sizeof(int)); free(pkts); if(copy_to_cpu){ cudaMemcpy(cpu_hash_table, hash_table, HASH_ENTRIES * sizeof(hash_entry), cudaMemcpyDeviceToHost); copy_to_cpu = false; } } void GPU_Metric::clean_flow_stats(uint64_t timestamp){ update_timestamp = timestamp; update_hash_table = true; auto start = std::chrono::system_clock::now(); auto end = std::chrono::system_clock::now(); std::chrono::duration<double> diff; while(update_hash_table){ end = std::chrono::system_clock::now(); diff = end - start; if(diff.count() > 2){ std::cout << "do not need to update hash table" << std::endl; update_hash_table = false; return; } } end = std::chrono::system_clock::now(); diff = end - start; std::cout << "updated hash table, used " << diff.count() << "s!" << std::endl; } void GPU_Metric::gpu_output_stats(){ std::cout << "max_array_num = " << max_array_num << std::endl; copy_to_cpu = true; auto start = std::chrono::system_clock::now(); auto end = std::chrono::system_clock::now(); std::chrono::duration<double> diff; while(copy_to_cpu){ end = std::chrono::system_clock::now(); diff = end - start; if(diff.count() > 1){ std::cout << "no features to record" << std::endl; copy_to_cpu = false; return; } } std::cout << "copied hash_table to cpu_hash_table" << std::endl; //output_to_file(); output_through_udp(); } void GPU_Metric::output_to_file(){ std::ofstream ofs("gpu_out", std::ofstream::app); ofs << "Output begin!" << std::endl; auto start = std::chrono::system_clock::now(); for(int i = 0; i < HASH_ENTRIES; ++i){ if(cpu_hash_table[i].state != 0){ ofs << cpu_hash_table[i].tuple << " " << cpu_hash_table[i].bidirec_flow << std::endl; } } auto end = std::chrono::system_clock::now(); std::chrono::duration<double> diff = end - start; ofs << "Output end, used " << diff.count() << "s!" << std::endl; ofs.close(); } void GPU_Metric::output_through_udp(){ //auto start = std::chrono::system_clock::now(); int index = 0; while(index < HASH_ENTRIES){ std::ostringstream oss; while(oss.tellp() < 1470 && index < HASH_ENTRIES){ if(cpu_hash_table[index].state != 0){ oss << cpu_hash_table[index].tuple << " " << cpu_hash_table[index].bidirec_flow << std::endl; } ++index; } std::string msg = oss.str(); sendto(sockfd, msg.c_str(), msg.size(), MSG_CONFIRM, (const struct sockaddr *)&servaddr, sizeof(servaddr)); } // auto end = std::chrono::system_clock::now(); // std::chrono::duration<double> diff = end - start; // std::string msg("Output end, used "); // msg += std::to_string(diff.count()); // msg += "\n"; // sendto(sockfd, msg.c_str(), msg.size(), MSG_CONFIRM, (const struct sockaddr *)&servaddr, sizeof(servaddr)); }
0decfd5d10b128b82c9ed3c0e2a16e2e2a5d124a.hip
// !!! This is a file automatically generated by hipify!!! #include <cudf/detail/gather.cuh> #include <cudf/detail/gather.hpp> #include <cudf/types.hpp> #include <cudf/column/column_view.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf/table/table_view.hpp> #include <cudf/table/table.hpp> #include <cudf/copying.hpp> #include <utilities/legacy/error_utils.hpp> #include <rmm/thrust_rmm_allocator.h> #include <thrust/count.h> #include <memory> namespace cudf { namespace experimental { namespace detail { struct dispatch_map_type { template <typename map_type, std::enable_if_t<std::is_integral<map_type>::value and not std::is_same<map_type, cudf::experimental::bool8>::value>* = nullptr> std::unique_ptr<table> operator()(table_view const& source_table, column_view const& gather_map, size_type num_destination_rows, bool check_bounds, bool ignore_out_of_bounds, bool allow_negative_indices = false, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), hipStream_t stream = 0) { std::unique_ptr<table> destination_table; if (check_bounds) { cudf::size_type begin = (allow_negative_indices) ? -source_table.num_rows() : 0; CUDF_EXPECTS( num_destination_rows == thrust::count_if( rmm::exec_policy()->on(0), gather_map.begin<map_type>(), gather_map.end<map_type>(), bounds_checker<map_type>{begin, source_table.num_rows()}), "Index out of bounds."); } if (allow_negative_indices) { destination_table = gather(source_table, thrust::make_transform_iterator( gather_map.begin<map_type>(), index_converter<map_type>{source_table.num_rows()}), thrust::make_transform_iterator( gather_map.end<map_type>(), index_converter<map_type>{source_table.num_rows()}), check_bounds, ignore_out_of_bounds, allow_negative_indices, mr, stream ); } else { destination_table = gather(source_table, gather_map.begin<map_type>(), gather_map.end<map_type>(), check_bounds, ignore_out_of_bounds, allow_negative_indices, mr, stream ); } return destination_table; } template <typename map_type, std::enable_if_t<not std::is_integral<map_type>::value or std::is_same<map_type, cudf::experimental::bool8>::value>* = nullptr> std::unique_ptr<table> operator()(table_view const& source_table, column_view const& gather_map, size_type num_destination_rows, bool check_bounds, bool ignore_out_of_bounds, bool allow_negative_indices = false, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), hipStream_t stream = 0) { CUDF_FAIL("Gather map must be an integral type."); } }; std::unique_ptr<table> gather(table_view const& source_table, column_view const& gather_map, bool check_bounds, bool ignore_out_of_bounds, bool allow_negative_indices, rmm::mr::device_memory_resource* mr, hipStream_t stream) { CUDF_EXPECTS(gather_map.has_nulls() == false, "gather_map contains nulls"); std::unique_ptr<table> destination_table = cudf::experimental::type_dispatcher(gather_map.type(), dispatch_map_type{}, source_table, gather_map, gather_map.size(), check_bounds, ignore_out_of_bounds, allow_negative_indices, mr, stream); return destination_table; } } // namespace detail std::unique_ptr<table> gather(table_view const& source_table, column_view const& gather_map, bool check_bounds, rmm::mr::device_memory_resource* mr) { return detail::gather(source_table, gather_map, check_bounds, false, true, mr); } } // namespace exp } // namespace cudf
0decfd5d10b128b82c9ed3c0e2a16e2e2a5d124a.cu
#include <cudf/detail/gather.cuh> #include <cudf/detail/gather.hpp> #include <cudf/types.hpp> #include <cudf/column/column_view.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf/table/table_view.hpp> #include <cudf/table/table.hpp> #include <cudf/copying.hpp> #include <utilities/legacy/error_utils.hpp> #include <rmm/thrust_rmm_allocator.h> #include <thrust/count.h> #include <memory> namespace cudf { namespace experimental { namespace detail { struct dispatch_map_type { template <typename map_type, std::enable_if_t<std::is_integral<map_type>::value and not std::is_same<map_type, cudf::experimental::bool8>::value>* = nullptr> std::unique_ptr<table> operator()(table_view const& source_table, column_view const& gather_map, size_type num_destination_rows, bool check_bounds, bool ignore_out_of_bounds, bool allow_negative_indices = false, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), cudaStream_t stream = 0) { std::unique_ptr<table> destination_table; if (check_bounds) { cudf::size_type begin = (allow_negative_indices) ? -source_table.num_rows() : 0; CUDF_EXPECTS( num_destination_rows == thrust::count_if( rmm::exec_policy()->on(0), gather_map.begin<map_type>(), gather_map.end<map_type>(), bounds_checker<map_type>{begin, source_table.num_rows()}), "Index out of bounds."); } if (allow_negative_indices) { destination_table = gather(source_table, thrust::make_transform_iterator( gather_map.begin<map_type>(), index_converter<map_type>{source_table.num_rows()}), thrust::make_transform_iterator( gather_map.end<map_type>(), index_converter<map_type>{source_table.num_rows()}), check_bounds, ignore_out_of_bounds, allow_negative_indices, mr, stream ); } else { destination_table = gather(source_table, gather_map.begin<map_type>(), gather_map.end<map_type>(), check_bounds, ignore_out_of_bounds, allow_negative_indices, mr, stream ); } return destination_table; } template <typename map_type, std::enable_if_t<not std::is_integral<map_type>::value or std::is_same<map_type, cudf::experimental::bool8>::value>* = nullptr> std::unique_ptr<table> operator()(table_view const& source_table, column_view const& gather_map, size_type num_destination_rows, bool check_bounds, bool ignore_out_of_bounds, bool allow_negative_indices = false, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), cudaStream_t stream = 0) { CUDF_FAIL("Gather map must be an integral type."); } }; std::unique_ptr<table> gather(table_view const& source_table, column_view const& gather_map, bool check_bounds, bool ignore_out_of_bounds, bool allow_negative_indices, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { CUDF_EXPECTS(gather_map.has_nulls() == false, "gather_map contains nulls"); std::unique_ptr<table> destination_table = cudf::experimental::type_dispatcher(gather_map.type(), dispatch_map_type{}, source_table, gather_map, gather_map.size(), check_bounds, ignore_out_of_bounds, allow_negative_indices, mr, stream); return destination_table; } } // namespace detail std::unique_ptr<table> gather(table_view const& source_table, column_view const& gather_map, bool check_bounds, rmm::mr::device_memory_resource* mr) { return detail::gather(source_table, gather_map, check_bounds, false, true, mr); } } // namespace exp } // namespace cudf
216d63f89735a1d913241341971efbd5c6e75cc6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <rocblas.h> #include <hip/hip_runtime.h> #include <limits> //#include "cudamat_kernels.cuh" #include "cudamat_a.cuh" #include "cudamat_kernels.cu" //#define AUTO_CUDA_SYNC #ifdef AUTO_CUDA_SYNC #define CUDA_THREAD_SYNC() hipDeviceSynchronize(); #else #define CUDA_THREAD_SYNC() #endif #ifdef _MSC_VER #define DLLEXP __declspec(dllexport) #else #define DLLEXP #endif extern "C" { typedef unsigned char ubyte; typedef double doubl; typedef unsigned int unsig; inline int dtype_size(int dtype) { if (dtype == 0) return sizeof(float); if (dtype == 1) return sizeof(double); if (dtype == 2) return sizeof(unsigned char); if (dtype == 3) return sizeof(unsigned int); return -1; } /* ------------------------------ CUBLAS init/shutdown ------------------------------ */ inline bool check_cublas_error() { cublasStatus status = hipblasGetError(); return status != HIPBLAS_STATUS_SUCCESS; } inline bool checkCUDAError(hipError_t err = hipSuccess) { if (err == hipSuccess) err = hipGetLastError(); if (err != hipSuccess) printf("%s\n", hipGetErrorString(err)); return hipSuccess != err; } DLLEXP extern const char* get_last_cuda_error() { hipError_t err = hipGetLastError(); return hipGetErrorString( err); } DLLEXP extern int cublas_init() { hipblasInit(); if (check_cublas_error()) return CUBLAS_ERROR; else return 0; } DLLEXP extern int cublas_shutdown() { hipblasShutdown(); if (check_cublas_error()) return CUBLAS_ERROR; else return 0; } DLLEXP extern void cuda_device_reset() { checkCUDAError(hipDeviceReset()); } DLLEXP extern int cuda_get_device(int* device_id) { hipError_t error = hipGetDevice(device_id); return error? CUDA_ERROR : 0; } DLLEXP extern int cuda_get_device_count() { int count = 0; hipGetDeviceCount(&count); return count; } DLLEXP extern int cuda_get_device_prop(hipDeviceProp_t* prop, int device) { hipError_t error = hipGetDeviceProperties(prop,device); return error ? CUDA_ERROR : 0; } DLLEXP extern size_t cuda_memory_available() { // force device to be ready for cuMemGetInfo void* ptr = 0; hipMalloc(&ptr,128); hipFree(ptr); size_t free = 0, total = 0; hipError_t err = cuMemGetInfo(&free,&total); return free; } DLLEXP extern size_t cuda_memory_total() { // force device to be ready for cuMemGetInfo void* ptr = 0; hipMalloc(&ptr,128); hipFree(ptr); size_t free = 0, total = 0; hipError_t err = cuMemGetInfo(&free,&total); return total; } DLLEXP extern int cuda_set_device(int deviceId) { hipError_t error = hipSetDevice(deviceId); return checkCUDAError(error) ? CUDA_ERROR : 0; } DLLEXP extern int init_random(rnd_struct* rnd_state, int seed, char* cudamatpath) { unsigned int * host_mults; host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int)); FILE * pFile; pFile = fopen (cudamatpath,"r"); for (int i = 0; i < NUM_RND_STREAMS; i++) { fscanf (pFile, "%u", &host_mults[i]); } fclose (pFile); hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults); hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words); hipblasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1); //hipMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int)); //hipMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long)); //hipMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), hipMemcpyHostToDevice); hipDeviceSynchronize(); hipLaunchKernelGGL(( kSeedRandom), dim3(NUM_RND_BLOCKS), dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, seed); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } /* ------------------------------ Utility routines ------------------------------ */ DLLEXP extern int get_leading_dimension(cudamat* mat) { return mat->is_trans ? mat->size[1] : mat->size[0]; } DLLEXP extern int get_nonleading_dimension(cudamat* mat) { return mat->is_trans ? mat->size[0] : mat->size[1]; } DLLEXP extern void set_transpose(cudamat* mat, int is_trans) { mat->is_trans = is_trans; } inline char get_transpose_char(cudamat* mat) { return mat->is_trans ? 't' : 'n'; } DLLEXP extern void cuda_sync_threads() { hipDeviceSynchronize(); } /* ------------------------------ Allocating/moving data ------------------------------ */ DLLEXP extern int allocate_device_memory(cudamat* mat) { int len = mat->size[0]*mat->size[1]; cublasStatus stat; if (dtype_size(mat->dtype) <= 0) return ERROR_DTYPE_UNSUPPORTED; stat = hipblasAlloc(len, dtype_size(mat->dtype), &mat->data_device); if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) return CUBLAS_ERROR; mat->on_device = 1; return 0; } DLLEXP extern int copy_to_host(cudamat* mat) { int len = mat->size[0]*mat->size[1]; if (mat->on_device) { hipblasGetVector(len, dtype_size(mat->dtype), mat->data_device, 1, mat->data_host, 1); if (check_cublas_error()) return CUBLAS_ERROR; } else return ERROR_NOT_ON_DEVICE; return 0; } DLLEXP extern int copy_to_device(cudamat* mat) { int len = mat->size[0]*mat->size[1]; int err_code = 0; //if (!mat->owns_data) // return VIEW_ERROR; if (!mat->on_device) { err_code = allocate_device_memory(mat); if (err_code) return err_code; } hipblasSetVector(len, dtype_size(mat->dtype), mat->data_host, 1, mat->data_device, 1); if (check_cublas_error()) return CUBLAS_ERROR; return 0; } DLLEXP extern int copy_on_device(cudamat* mat1, cudamat* mat2) { int len = mat1->size[0]*mat1->size[1]; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != mat2->dtype) return ERROR_MISMATCHED_DTYPE; //hipblasScopy(len, mat1->data_device, 1, mat2->data_device, 1); hipMemcpy(mat2->data_device,mat1->data_device,len*dtype_size(mat1->dtype),hipMemcpyDeviceToDevice); if (check_cublas_error()) return CUBLAS_ERROR; else return 0; } DLLEXP extern int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) { unsigned int height = source->size[0]; unsigned int width = source->size[1]; if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height) return ERROR_INCOMPATIBLE_DIMENSIONS; if (source->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1); dim3 kernelBlockDim(32, 1, 1); if (source->dtype == 0) hipLaunchKernelGGL(( kGetRowSlice<float>), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, (float*)source->data_device, (float*)target->data_device, start, end, width, height); if (source->dtype == 1) hipLaunchKernelGGL(( kGetRowSlice<doubl>), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, (doubl*)source->data_device, (doubl*)target->data_device, start, end, width, height); if (source->dtype == 2) hipLaunchKernelGGL(( kGetRowSlice<ubyte>), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, (ubyte*)source->data_device, (ubyte*)target->data_device, start, end, width, height); if (source->dtype == 3) hipLaunchKernelGGL(( kGetRowSlice<unsig>), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, (unsig*)source->data_device, (unsig*)target->data_device, start, end, width, height); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } DLLEXP extern int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) { unsigned int height = target->size[0]; unsigned int width = target->size[1]; if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height) return ERROR_INCOMPATIBLE_DIMENSIONS; if (source->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1); dim3 kernelBlockDim(32, 1, 1); if (source->dtype == 0) hipLaunchKernelGGL(( kSetRowSlice<float>), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, (float*)source->data_device, (float*)target->data_device, start, end, width, height); if (source->dtype == 1) hipLaunchKernelGGL(( kSetRowSlice<doubl>), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, (doubl*)source->data_device, (doubl*)target->data_device, start, end, width, height); if (source->dtype == 2) hipLaunchKernelGGL(( kSetRowSlice<ubyte>), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, (ubyte*)source->data_device, (ubyte*)target->data_device, start, end, width, height); if (source->dtype == 3) hipLaunchKernelGGL(( kSetRowSlice<unsig>), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, (unsig*)source->data_device, (unsig*)target->data_device, start, end, width, height); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } DLLEXP extern int copy_transpose(cudamat* source, cudamat* target) { unsigned int height = source->size[0]; unsigned int width = source->size[1]; if (source->size[0] != target->size[1] || source->size[1] != target->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (source->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; // setup execution parameters unsigned int grid_x = height / COPY_BLOCK_SIZE; if (height % COPY_BLOCK_SIZE) grid_x++; unsigned int grid_y = width / COPY_BLOCK_SIZE; if (width % COPY_BLOCK_SIZE) grid_y++; dim3 grid(grid_x, grid_y, 1); dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1); if (source->dtype == 0) hipLaunchKernelGGL(( kTranspose<float>), dim3(grid), dim3(threads) , 0, 0, (float*)target->data_device, (float*)source->data_device, height, width); if (source->dtype == 1) hipLaunchKernelGGL(( kTranspose<doubl>), dim3(grid), dim3(threads) , 0, 0, (doubl*)target->data_device, (doubl*)source->data_device, height, width); if (source->dtype == 2) hipLaunchKernelGGL(( kTranspose<ubyte>), dim3(grid), dim3(threads) , 0, 0, (ubyte*)target->data_device, (ubyte*)source->data_device, height, width); if (source->dtype == 3) hipLaunchKernelGGL(( kTranspose<unsig>), dim3(grid), dim3(threads) , 0, 0, (unsig*)target->data_device, (unsig*)source->data_device, height, width); if (checkCUDAError()) return CUDA_ERROR; else return 0; } DLLEXP extern int free_device_memory(cudamat* mat) { if (mat->owns_data && mat->on_device) { cublasStatus stat; stat = hipblasFree(mat->data_device); mat->on_device = 0; if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) return CUBLAS_ERROR; } return 0; } DLLEXP extern int reshape(cudamat* mat, unsigned int m, unsigned int n) { if (mat->size[0] * mat->size[1] != m * n) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->size[0] = m; mat->size[1] = n; return 0; } DLLEXP extern int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) { if (source->is_trans) return ERROR_TRANSPOSED; if (!source->on_device) return ERROR_NOT_ON_DEVICE; if (last_col > source->size[1] || (first_col >= last_col)) return ERROR_INCOMPATIBLE_DIMENSIONS; int num_rows = source->size[0]; target->data_host = 0; target->data_device = (unsigned char*)source->data_device + first_col * num_rows * dtype_size(source->dtype); target->on_device = 1; target->on_host = 0; target->size[0] = source->size[0]; target->size[1] = last_col - first_col; target->is_trans = 0; target->owns_data = 0; target->dtype = source->dtype; return 0; } DLLEXP extern int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) { // source must be a vector if (source->size[0] > 1 && source->size[1] > 1) return ERROR_GENERIC; if (source->is_trans) return ERROR_TRANSPOSED; if (!source->on_device) return ERROR_NOT_ON_DEVICE; if (first_ind >= last_ind) return ERROR_INCOMPATIBLE_DIMENSIONS; unsigned int num_rows = source->size[0]; target->data_host = 0; target->data_device = (unsigned char*)source->data_device + first_ind * num_rows * dtype_size(source->dtype); target->on_device = 1; target->on_host = 0; target->is_trans = 0; target->owns_data = 0; target->dtype = source->dtype; if (source->size[0] > 1) { if (last_ind > source->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; target->size[0] = last_ind - first_ind; target->size[1] = 1; } else { if (last_ind > source->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; target->size[0] = 1; target->size[1] = last_ind - first_ind; } return 0; } /* ------------------------------ Initialization routines ------------------------------ */ DLLEXP extern void init_from_array(cudamat* mat, void* data, int m, int n, int dtype) { mat->data_host = data; mat->dtype = dtype; mat->size[0] = m; mat->size[1] = n; mat->on_device = 0; mat->on_host = 1; mat->is_trans = 0; mat->owns_data = 1; } DLLEXP extern int init_empty(cudamat* mat, int m, int n, int dtype) { mat->dtype = dtype; mat->size[0] = m; mat->size[1] = n; mat->on_device = 0; mat->on_host = 0; mat->is_trans = 0; mat->owns_data = 1; return allocate_device_memory(mat); } /* ------------------------------ Random number generation ------------------------------ */ DLLEXP extern int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) { int len = mat->size[0] * mat->size[1]; if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (mat->dtype != 0) return ERROR_DTYPE_UNSUPPORTED; hipLaunchKernelGGL(( kRandomUniform), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, (float*)mat->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } DLLEXP extern int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) { int len = mat->size[0] * mat->size[1]; if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (mat->dtype != 0) return ERROR_DTYPE_UNSUPPORTED; hipLaunchKernelGGL(( kRandomGaussian), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, (float*)mat->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } /* ------------------------------ Algebraic operations ------------------------------ */ DLLEXP extern int diff_cols(cudamat* mat, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]+1) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kDiffCols<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)target->data_device, w, h); if (mat->dtype == 1)hipLaunchKernelGGL(( kDiffCols<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)target->data_device, w, h); if (mat->dtype == 2)hipLaunchKernelGGL(( kDiffCols<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte*)target->data_device, w, h); if (mat->dtype == 3)hipLaunchKernelGGL(( kDiffCols<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig*)target->data_device, w, h); CUDA_THREAD_SYNC(); if (checkCUDAError()) { return CUDA_ERROR; } return 0; } DLLEXP extern int diff_rows(cudamat* mat, cudamat* target) { return ERROR_UNSUPPORTED; // TODO } DLLEXP extern int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != vec->dtype || mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kAddColVector<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)vec->data_device, (float*)target->data_device, w, h); if (mat->dtype == 1)hipLaunchKernelGGL(( kAddColVector<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)vec->data_device, (doubl*)target->data_device, w, h); if (mat->dtype == 2)hipLaunchKernelGGL(( kAddColVector<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte*)vec->data_device, (ubyte*)target->data_device, w, h); if (mat->dtype == 3)hipLaunchKernelGGL(( kAddColVector<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig*)vec->data_device, (unsig*)target->data_device, w, h); CUDA_THREAD_SYNC(); if (checkCUDAError()) { return CUDA_ERROR; } return 0; } DLLEXP extern int sub_col_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != vec->dtype || mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kSubColVector<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)vec->data_device, (float*)target->data_device, w, h); if (mat->dtype == 1)hipLaunchKernelGGL(( kSubColVector<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)vec->data_device, (doubl*)target->data_device, w, h); if (mat->dtype == 2)hipLaunchKernelGGL(( kSubColVector<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte*)vec->data_device, (ubyte*)target->data_device, w, h); if (mat->dtype == 3)hipLaunchKernelGGL(( kSubColVector<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig*)vec->data_device, (unsig*)target->data_device, w, h); CUDA_THREAD_SYNC(); if (checkCUDAError()) { return CUDA_ERROR; } return 0; } DLLEXP extern int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, double mult) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != vec->dtype || mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kAddColMult<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)vec->data_device, (float*)target->data_device, (float)mult, w, h); if (mat->dtype == 1)hipLaunchKernelGGL(( kAddColMult<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)vec->data_device, (doubl*)target->data_device, (doubl)mult, w, h); if (mat->dtype == 2)hipLaunchKernelGGL(( kAddColMult<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte*)vec->data_device, (ubyte*)target->data_device, (ubyte)mult, w, h); if (mat->dtype == 3)hipLaunchKernelGGL(( kAddColMult<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig*)vec->data_device, (unsig*)target->data_device, (unsig)mult, w, h); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[1] != vec->size[1] || vec->size[0] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != vec->dtype || mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kAddRowVector<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)vec->data_device, (float*)target->data_device, w, h); if (mat->dtype == 1)hipLaunchKernelGGL(( kAddRowVector<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)vec->data_device, (doubl*)target->data_device, w, h); if (mat->dtype == 2)hipLaunchKernelGGL(( kAddRowVector<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte*)vec->data_device, (ubyte*)target->data_device, w, h); if (mat->dtype == 3)hipLaunchKernelGGL(( kAddRowVector<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig*)vec->data_device, (unsig*)target->data_device, w, h); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int sub_row_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[1] != vec->size[1] || vec->size[0] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != vec->dtype || mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kSubRowVector<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)vec->data_device, (float*)target->data_device, w, h); if (mat->dtype == 1)hipLaunchKernelGGL(( kSubRowVector<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)vec->data_device, (doubl*)target->data_device, w, h); if (mat->dtype == 2)hipLaunchKernelGGL(( kSubRowVector<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte*)vec->data_device, (ubyte*)target->data_device, w, h); if (mat->dtype == 3)hipLaunchKernelGGL(( kSubRowVector<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig*)vec->data_device, (unsig*)target->data_device, w, h); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != vec->dtype || mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kMultByColVector<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)vec->data_device, (float*)target->data_device, w, h); if (mat->dtype == 1)hipLaunchKernelGGL(( kMultByColVector<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)vec->data_device, (doubl*)target->data_device, w, h); if (mat->dtype == 2)hipLaunchKernelGGL(( kMultByColVector<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte*)vec->data_device, (ubyte*)target->data_device, w, h); if (mat->dtype == 3)hipLaunchKernelGGL(( kMultByColVector<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig*)vec->data_device, (unsig*)target->data_device, w, h); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[1] != vec->size[1] || vec->size[0] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != vec->dtype || mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kMultByRowVector<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)vec->data_device, (float*)target->data_device, w, h); if (mat->dtype == 1)hipLaunchKernelGGL(( kMultByRowVector<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)vec->data_device, (doubl*)target->data_device, w, h); if (mat->dtype == 2)hipLaunchKernelGGL(( kMultByRowVector<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte*)vec->data_device, (ubyte*)target->data_device, w, h); if (mat->dtype == 3)hipLaunchKernelGGL(( kMultByRowVector<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig*)vec->data_device, (unsig*)target->data_device, w, h); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int clip_norm(cudamat* mat, cudamat* vec, double eps, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != vec->dtype || mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kClipNorm<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)vec->data_device, (float)eps, (float*)target->data_device, w, h); if (mat->dtype == 1)hipLaunchKernelGGL(( kClipNorm<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)vec->data_device, (doubl)eps, (doubl*)target->data_device, w, h); if (mat->dtype == 2)hipLaunchKernelGGL(( kClipNorm<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte*)vec->data_device, (ubyte)eps, (ubyte*)target->data_device, w, h); if (mat->dtype == 3)hipLaunchKernelGGL(( kClipNorm<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig*)vec->data_device, (unsig)eps, (unsig*)target->data_device, w, h); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != mat2->dtype) return ERROR_MISMATCHED_DTYPE; if (target->dtype == 0) { if (mat1->dtype == 0) hipLaunchKernelGGL(( kLessThan<float,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat1->data_device, (float*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 1) hipLaunchKernelGGL(( kLessThan<doubl,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat1->data_device, (doubl*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 2) hipLaunchKernelGGL(( kLessThan<ubyte,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 3) hipLaunchKernelGGL(( kLessThan<unsig,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat1->data_device, (unsig*)mat2->data_device, (float*)target->data_device, len); } else if (target->dtype == 2) { if (mat1->dtype == 0) hipLaunchKernelGGL(( kLessThan<float,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat1->data_device, (float*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 1) hipLaunchKernelGGL(( kLessThan<doubl,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat1->data_device, (doubl*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 2) hipLaunchKernelGGL(( kLessThan<ubyte,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 3) hipLaunchKernelGGL(( kLessThan<unsig,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat1->data_device, (unsig*)mat2->data_device, (ubyte*)target->data_device, len); } else if (target->dtype == 3) { if (mat1->dtype == 0) hipLaunchKernelGGL(( kLessThan<float,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat1->data_device, (float*)mat2->data_device, (unsig*)target->data_device, len); if (mat1->dtype == 1) hipLaunchKernelGGL(( kLessThan<doubl,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat1->data_device, (doubl*)mat2->data_device, (unsig*)target->data_device, len); if (mat1->dtype == 2) hipLaunchKernelGGL(( kLessThan<ubyte,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (unsig*)target->data_device, len); if (mat1->dtype == 3) hipLaunchKernelGGL(( kLessThan<unsig,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat1->data_device, (unsig*)mat2->data_device, (unsig*)target->data_device, len); } else { return ERROR_DTYPE_UNSUPPORTED; } CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int less_than_scalar(cudamat* mat, double val, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (target->dtype == 0) { if (mat->dtype == 0) hipLaunchKernelGGL(( kLessThanScalar<float,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float)val, (float*)target->data_device, len); if (mat->dtype == 1) hipLaunchKernelGGL(( kLessThanScalar<doubl,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl)val, (float*)target->data_device, len); if (mat->dtype == 2) hipLaunchKernelGGL(( kLessThanScalar<ubyte,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte)val, (float*)target->data_device, len); if (mat->dtype == 3) hipLaunchKernelGGL(( kLessThanScalar<unsig,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig)val, (float*)target->data_device, len); } else if (target->dtype == 2) { if (mat->dtype == 0) hipLaunchKernelGGL(( kLessThanScalar<float,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float)val, (ubyte*)target->data_device, len); if (mat->dtype == 1) hipLaunchKernelGGL(( kLessThanScalar<doubl,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl)val, (ubyte*)target->data_device, len); if (mat->dtype == 2) hipLaunchKernelGGL(( kLessThanScalar<ubyte,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte)val, (ubyte*)target->data_device, len); if (mat->dtype == 3) hipLaunchKernelGGL(( kLessThanScalar<unsig,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig)val, (ubyte*)target->data_device, len); } else if (target->dtype == 3) { if (mat->dtype == 0) hipLaunchKernelGGL(( kLessThanScalar<float,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float)val, (unsig*)target->data_device, len); if (mat->dtype == 1) hipLaunchKernelGGL(( kLessThanScalar<doubl,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl)val, (unsig*)target->data_device, len); if (mat->dtype == 2) hipLaunchKernelGGL(( kLessThanScalar<ubyte,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte)val, (unsig*)target->data_device, len); if (mat->dtype == 3) hipLaunchKernelGGL(( kLessThanScalar<unsig,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig)val, (unsig*)target->data_device, len); } else { return ERROR_DTYPE_UNSUPPORTED; } CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != mat2->dtype) return ERROR_MISMATCHED_DTYPE; if (target->dtype == 0) { if (mat1->dtype == 0) hipLaunchKernelGGL(( kGreaterThan<float,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat1->data_device, (float*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 1) hipLaunchKernelGGL(( kGreaterThan<doubl,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat1->data_device, (doubl*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 2) hipLaunchKernelGGL(( kGreaterThan<ubyte,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 3) hipLaunchKernelGGL(( kGreaterThan<unsig,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat1->data_device, (unsig*)mat2->data_device, (float*)target->data_device, len); } else if (target->dtype == 2) { if (mat1->dtype == 0) hipLaunchKernelGGL(( kGreaterThan<float,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat1->data_device, (float*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 1) hipLaunchKernelGGL(( kGreaterThan<doubl,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat1->data_device, (doubl*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 2) hipLaunchKernelGGL(( kGreaterThan<ubyte,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 3) hipLaunchKernelGGL(( kGreaterThan<unsig,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat1->data_device, (unsig*)mat2->data_device, (ubyte*)target->data_device, len); } else if (target->dtype == 3) { if (mat1->dtype == 0) hipLaunchKernelGGL(( kGreaterThan<float,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat1->data_device, (float*)mat2->data_device, (unsig*)target->data_device, len); if (mat1->dtype == 1) hipLaunchKernelGGL(( kGreaterThan<doubl,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat1->data_device, (doubl*)mat2->data_device, (unsig*)target->data_device, len); if (mat1->dtype == 2) hipLaunchKernelGGL(( kGreaterThan<ubyte,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (unsig*)target->data_device, len); if (mat1->dtype == 3) hipLaunchKernelGGL(( kGreaterThan<unsig,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat1->data_device, (unsig*)mat2->data_device, (unsig*)target->data_device, len); } else { return ERROR_DTYPE_UNSUPPORTED; } CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int greater_than_scalar(cudamat* mat, double val, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (target->dtype == 0) { if (mat->dtype == 0) hipLaunchKernelGGL(( kGreaterThanScalar<float,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float)val, (float*)target->data_device, len); if (mat->dtype == 1) hipLaunchKernelGGL(( kGreaterThanScalar<doubl,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl)val, (float*)target->data_device, len); if (mat->dtype == 2) hipLaunchKernelGGL(( kGreaterThanScalar<ubyte,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte)val, (float*)target->data_device, len); if (mat->dtype == 3) hipLaunchKernelGGL(( kGreaterThanScalar<unsig,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig)val, (float*)target->data_device, len); } else if (target->dtype == 2) { if (mat->dtype == 0) hipLaunchKernelGGL(( kGreaterThanScalar<float,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float)val, (ubyte*)target->data_device, len); if (mat->dtype == 1) hipLaunchKernelGGL(( kGreaterThanScalar<doubl,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl)val, (ubyte*)target->data_device, len); if (mat->dtype == 2) hipLaunchKernelGGL(( kGreaterThanScalar<ubyte,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte)val, (ubyte*)target->data_device, len); if (mat->dtype == 3) hipLaunchKernelGGL(( kGreaterThanScalar<unsig,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig)val, (ubyte*)target->data_device, len); } else if (target->dtype == 3) { if (mat->dtype == 0) hipLaunchKernelGGL(( kGreaterThanScalar<float,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float)val, (unsig*)target->data_device, len); if (mat->dtype == 1) hipLaunchKernelGGL(( kGreaterThanScalar<doubl,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl)val, (unsig*)target->data_device, len); if (mat->dtype == 2) hipLaunchKernelGGL(( kGreaterThanScalar<ubyte,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte)val, (unsig*)target->data_device, len); if (mat->dtype == 3) hipLaunchKernelGGL(( kGreaterThanScalar<unsig,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig)val, (unsig*)target->data_device, len); } else { return ERROR_DTYPE_UNSUPPORTED; } CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int maximum(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != mat2->dtype || mat1->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat1->dtype == 0)hipLaunchKernelGGL(( kMaximum<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat1->data_device, (float*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 1)hipLaunchKernelGGL(( kMaximum<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat1->data_device, (doubl*)mat2->data_device, (doubl*)target->data_device, len); if (mat1->dtype == 2)hipLaunchKernelGGL(( kMaximum<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 3)hipLaunchKernelGGL(( kMaximum<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat1->data_device, (unsig*)mat2->data_device, (unsig*)target->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int maximum_scalar(cudamat* mat, double val, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kMaximumScalar<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float)val, (float*)target->data_device, len); if (mat->dtype == 1)hipLaunchKernelGGL(( kMaximumScalar<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl)val, (doubl*)target->data_device, len); if (mat->dtype == 2)hipLaunchKernelGGL(( kMaximumScalar<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte)val, (ubyte*)target->data_device, len); if (mat->dtype == 3)hipLaunchKernelGGL(( kMaximumScalar<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig)val, (unsig*)target->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int max_by_axis(cudamat* mat, cudamat* target, int axis) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype == 0)hipLaunchKernelGGL(( kMaxColumnwise<float>), dim3(w),dim3(32), 0, 0, (float*)mat->data_device, (float*)target->data_device, w, h, std::numeric_limits<float>::min()); if (mat->dtype == 1)hipLaunchKernelGGL(( kMaxColumnwise<doubl>), dim3(w),dim3(32), 0, 0, (doubl*)mat->data_device, (doubl*)target->data_device, w, h, std::numeric_limits<doubl>::min()); if (mat->dtype == 2)hipLaunchKernelGGL(( kMaxColumnwise<ubyte>), dim3(w),dim3(32), 0, 0, (ubyte*)mat->data_device, (ubyte*)target->data_device, w, h, 0); if (mat->dtype == 3)hipLaunchKernelGGL(( kMaxColumnwise<unsig>), dim3(w),dim3(32), 0, 0, (unsig*)mat->data_device, (unsig*)target->data_device, w, h, 0); CUDA_THREAD_SYNC(); } else return ERROR_UNSUPPORTED; if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int min_by_axis(cudamat* mat, cudamat* target, int axis) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype == 0)hipLaunchKernelGGL(( kMinColumnwise<float>), dim3(w),dim3(32), 0, 0, (float*)mat->data_device, (float*)target->data_device, w, h, std::numeric_limits<float>::max()); if (mat->dtype == 1)hipLaunchKernelGGL(( kMinColumnwise<doubl>), dim3(w),dim3(32), 0, 0, (doubl*)mat->data_device, (doubl*)target->data_device, w, h, std::numeric_limits<doubl>::max()); if (mat->dtype == 2)hipLaunchKernelGGL(( kMinColumnwise<ubyte>), dim3(w),dim3(32), 0, 0, (ubyte*)mat->data_device, (ubyte*)target->data_device, w, h, 0); if (mat->dtype == 3)hipLaunchKernelGGL(( kMinColumnwise<unsig>), dim3(w),dim3(32), 0, 0, (unsig*)mat->data_device, (unsig*)target->data_device, w, h, 0); CUDA_THREAD_SYNC(); } else return ERROR_UNSUPPORTED; if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int sign(cudamat* mat, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kSign<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1)hipLaunchKernelGGL(( kSign<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_relu(cudamat* mat, cudamat* target, cudamat* dtarget) { unsigned int len = mat->size[0] * mat->size[1]; if (!dtarget) return maximum_scalar(mat,0,target); if (!mat->on_device || !target->on_device || !dtarget->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->size[0] != dtarget->size[0] || mat->size[1] != dtarget->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype || mat->dtype != dtarget->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kApplyReluD<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)target->data_device, (float*)dtarget->data_device, len); else if (mat->dtype == 1)hipLaunchKernelGGL(( kApplyReluD<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)target->data_device, (doubl*)dtarget->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_sigmoid(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kApplySigmoid<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1)hipLaunchKernelGGL(( kApplySigmoid<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_sigmoid_deriv(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kApplySigmoidDeriv<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1)hipLaunchKernelGGL(( kApplySigmoidDeriv<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_tanh(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kApplyTanh<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1)hipLaunchKernelGGL(( kApplyTanh<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_tanh_deriv(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kApplyTanhDeriv<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1)hipLaunchKernelGGL(( kApplyTanhDeriv<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_abs(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kApplyAbs<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)target->data_device, len); if (mat->dtype == 1)hipLaunchKernelGGL(( kApplyAbs<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)target->data_device, len); /* do nothing for unsigned types */ CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_log_1_plus_exp(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kApplyLog1PlusExp<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1)hipLaunchKernelGGL(( kApplyLog1PlusExp<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_log(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kLog<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1)hipLaunchKernelGGL(( kLog<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_exp(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kExp<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1)hipLaunchKernelGGL(( kExp<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_sqrt(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kSqrt<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1)hipLaunchKernelGGL(( kSqrt<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)target->data_device, len); else if (mat->dtype == 2)hipLaunchKernelGGL(( kSqrt<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int square(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kSquare<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1)hipLaunchKernelGGL(( kSquare<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)target->data_device, len); else if (mat->dtype == 2)hipLaunchKernelGGL(( kSquare<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_pow(cudamat* mat, double pow, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kPow<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float)pow, (float*)target->data_device, len); else if (mat->dtype == 1)hipLaunchKernelGGL(( kPow<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl)pow, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype || mat->dtype != pow->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kPowMatrix<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)pow->data_device, (float*)target->data_device, len); else if (mat->dtype == 1)hipLaunchKernelGGL(( kPowMatrix<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)pow->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int reciprocal(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kReciprocal<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1)hipLaunchKernelGGL(( kReciprocal<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int dot(cudamat* mat1, cudamat* mat2, cudamat* target, double beta, double alpha) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->dtype != mat2->dtype || mat1->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat1->dtype >= 2 || mat2->dtype >= 2 || target->dtype >= 2) return ERROR_DTYPE_UNSUPPORTED; if (get_leading_dimension(mat1) != get_leading_dimension(target) || get_nonleading_dimension(mat2) != get_nonleading_dimension(target) || get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) { return ERROR_INCOMPATIBLE_DIMENSIONS; } int m = get_leading_dimension(mat1), k = get_leading_dimension(mat2), n = get_nonleading_dimension(mat2); if (mat1->dtype == 0) { hipblasSgemm(get_transpose_char(mat1), get_transpose_char(mat2), m, n, k, (float)alpha, (const float*)mat1->data_device, mat1->size[0], (const float*)mat2->data_device, mat2->size[0], (float)beta, (float*)target->data_device, target->size[0]); } else { hipblasDgemm(get_transpose_char(mat1), get_transpose_char(mat2), m, n, k, (doubl)alpha, (const doubl*)mat1->data_device, mat1->size[0], (const doubl*)mat2->data_device, mat2->size[0], (doubl)beta, (doubl*)target->data_device, target->size[0]); } if (check_cublas_error()) return CUBLAS_ERROR; return 0; } DLLEXP extern double vdot(cudamat* mat1, cudamat* mat2, int* err_code) { int len = mat1->size[0]*mat1->size[1]; double res; if (!mat1->on_device || !mat2->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) { *err_code = ERROR_TRANSPOSEDNESS; return 0; } if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) { *err_code = ERROR_INCOMPATIBLE_DIMENSIONS; return 0; } if (mat1->dtype != mat2->dtype) return ERROR_MISMATCHED_DTYPE; if (mat1->dtype >= 2 || mat2->dtype >= 2) return ERROR_DTYPE_UNSUPPORTED; if (mat1->dtype == 0) res = hipblasSdot(len, (const float*)mat1->data_device, 1, (const float*)mat2->data_device, 1); if (mat1->dtype == 1) res = hipblasDdot(len, (const doubl*)mat1->data_device, 1, (const doubl*)mat2->data_device, 1); if (check_cublas_error()) { *err_code = CUBLAS_ERROR; return -1.; } else { *err_code = 0; return res; } } /* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must have the same transposedness. */ DLLEXP extern int add_mult(cudamat* mat1, cudamat* mat2, double alpha) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != mat2->dtype) return ERROR_MISMATCHED_DTYPE; if (mat1->dtype >= 2 || mat2->dtype >= 2) return ERROR_DTYPE_UNSUPPORTED; if (mat1->dtype == 0) hipblasSaxpy(len, (float)alpha, (const float*)mat2->data_device, 1, (float*)mat1->data_device, 1); else hipblasDaxpy(len, (doubl)alpha, (const doubl*)mat2->data_device, 1, (doubl*)mat1->data_device, 1); if (check_cublas_error()) return CUBLAS_ERROR; return 0; } DLLEXP extern int add_transpose(cudamat* mat1, cudamat* mat2, cudamat* target) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[1] || mat1->size[1] != mat2->size[0] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != target->dtype || mat2->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; unsigned int height = mat2->size[0]; unsigned int width = mat2->size[1]; // setup execution parameters unsigned int grid_x = height / COPY_BLOCK_SIZE; if (height % COPY_BLOCK_SIZE) grid_x++; unsigned int grid_y = width / COPY_BLOCK_SIZE; if (width % COPY_BLOCK_SIZE) grid_y++; dim3 grid(grid_x, grid_y, 1); dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1); if (mat1->dtype == 0)hipLaunchKernelGGL(( kAddTrans<float>), dim3(grid),dim3(threads), 0, 0, (float*)mat1->data_device, (float*)mat2->data_device, (float*)target->data_device, height,width); if (mat1->dtype == 1)hipLaunchKernelGGL(( kAddTrans<doubl>), dim3(grid),dim3(threads), 0, 0, (doubl*)mat1->data_device, (doubl*)mat2->data_device, (doubl*)target->data_device, height,width); if (mat1->dtype == 2)hipLaunchKernelGGL(( kAddTrans<ubyte>), dim3(grid),dim3(threads), 0, 0, (ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (ubyte*)target->data_device, height,width); if (mat1->dtype == 3)hipLaunchKernelGGL(( kAddTrans<unsig>), dim3(grid),dim3(threads), 0, 0, (unsig*)mat1->data_device, (unsig*)mat2->data_device, (unsig*)target->data_device, height,width); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != target->dtype || mat2->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat1->dtype == 0)hipLaunchKernelGGL(( kAdd<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat1->data_device, (float*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 1)hipLaunchKernelGGL(( kAdd<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat1->data_device, (doubl*)mat2->data_device, (doubl*)target->data_device, len); if (mat1->dtype == 2)hipLaunchKernelGGL(( kAdd<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 3)hipLaunchKernelGGL(( kAdd<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat1->data_device, (unsig*)mat2->data_device, (unsig*)target->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != target->dtype || mat2->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat1->dtype == 0)hipLaunchKernelGGL(( kSubtract<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat1->data_device, (float*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 1)hipLaunchKernelGGL(( kSubtract<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat1->data_device, (doubl*)mat2->data_device, (doubl*)target->data_device, len); if (mat1->dtype == 2)hipLaunchKernelGGL(( kSubtract<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 3)hipLaunchKernelGGL(( kSubtract<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat1->data_device, (unsig*)mat2->data_device, (unsig*)target->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int subtract_transpose(cudamat* mat1, cudamat* mat2, cudamat* target) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[1] || mat1->size[1] != mat2->size[0] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != target->dtype || mat2->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; unsigned int height = mat2->size[0]; unsigned int width = mat2->size[1]; // setup execution parameters unsigned int grid_x = height / COPY_BLOCK_SIZE; if (height % COPY_BLOCK_SIZE) grid_x++; unsigned int grid_y = width / COPY_BLOCK_SIZE; if (width % COPY_BLOCK_SIZE) grid_y++; dim3 grid(grid_x, grid_y, 1); dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1); if (mat1->dtype == 0)hipLaunchKernelGGL(( kSubtractTrans<float>), dim3(grid),dim3(threads), 0, 0, (float*)mat1->data_device, (float*)mat2->data_device, (float*)target->data_device, height,width); if (mat1->dtype == 1)hipLaunchKernelGGL(( kSubtractTrans<doubl>), dim3(grid),dim3(threads), 0, 0, (doubl*)mat1->data_device, (doubl*)mat2->data_device, (doubl*)target->data_device, height,width); if (mat1->dtype == 2)hipLaunchKernelGGL(( kSubtractTrans<ubyte>), dim3(grid),dim3(threads), 0, 0, (ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (ubyte*)target->data_device, height,width); if (mat1->dtype == 3)hipLaunchKernelGGL(( kSubtractTrans<unsig>), dim3(grid),dim3(threads), 0, 0, (unsig*)mat1->data_device, (unsig*)mat2->data_device, (unsig*)target->data_device, height,width); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != target->dtype || mat2->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat1->dtype == 0)hipLaunchKernelGGL(( kDivide<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat1->data_device, (float*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 1)hipLaunchKernelGGL(( kDivide<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat1->data_device, (doubl*)mat2->data_device, (doubl*)target->data_device, len); if (mat1->dtype == 2)hipLaunchKernelGGL(( kDivide<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 3)hipLaunchKernelGGL(( kDivide<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat1->data_device, (unsig*)mat2->data_device, (unsig*)target->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } /* Elementwise multiplication of 2 matrices */ DLLEXP extern int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != target->dtype || mat2->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat1->dtype == 0)hipLaunchKernelGGL(( kMult<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat1->data_device, (float*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 1)hipLaunchKernelGGL(( kMult<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat1->data_device, (doubl*)mat2->data_device, (doubl*)target->data_device, len); if (mat1->dtype == 2)hipLaunchKernelGGL(( kMult<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 3)hipLaunchKernelGGL(( kMult<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat1->data_device, (unsig*)mat2->data_device, (unsig*)target->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int assign_array(cudamat* src, cudamat* dst) { int len = src->size[0]*src->size[1]; if (!src->on_device || !dst->on_device) return ERROR_NOT_ON_DEVICE; if (src->size[0] != dst->size[0] || src->size[1] != dst->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (src->dtype == 0) { if (dst->dtype == 1)hipLaunchKernelGGL(( kAssignArray<float,doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)src->data_device, (doubl*)dst->data_device, len); if (dst->dtype == 2)hipLaunchKernelGGL(( kAssignArray<float,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)src->data_device, (ubyte*)dst->data_device, len); if (dst->dtype == 3)hipLaunchKernelGGL(( kAssignArray<float,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)src->data_device, (unsig*)dst->data_device, len); } else if (src->dtype == 1) { if (dst->dtype == 0)hipLaunchKernelGGL(( kAssignArray<doubl,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)src->data_device, (float*)dst->data_device, len); if (dst->dtype == 2)hipLaunchKernelGGL(( kAssignArray<doubl,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)src->data_device, (ubyte*)dst->data_device, len); if (dst->dtype == 3)hipLaunchKernelGGL(( kAssignArray<doubl,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)src->data_device, (unsig*)dst->data_device, len); } else if (src->dtype == 2) { if (dst->dtype == 0)hipLaunchKernelGGL(( kAssignArray<ubyte,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)src->data_device, (float*)dst->data_device, len); if (dst->dtype == 1)hipLaunchKernelGGL(( kAssignArray<ubyte,doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)src->data_device, (doubl*)dst->data_device, len); if (dst->dtype == 3)hipLaunchKernelGGL(( kAssignArray<ubyte,unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)src->data_device, (unsig*)dst->data_device, len); } else if (src->dtype == 3) { if (dst->dtype == 0)hipLaunchKernelGGL(( kAssignArray<unsig,float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)src->data_device, (float*)dst->data_device, len); if (dst->dtype == 1)hipLaunchKernelGGL(( kAssignArray<unsig,doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)src->data_device, (doubl*)dst->data_device, len); if (dst->dtype == 2)hipLaunchKernelGGL(( kAssignArray<unsig,ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)src->data_device, (ubyte*)dst->data_device, len); } CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int assign_scalar(cudamat* mat, double alpha) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (mat->dtype == 0)hipLaunchKernelGGL(( kAssignScalar<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float)alpha, len); if (mat->dtype == 1)hipLaunchKernelGGL(( kAssignScalar<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl)alpha, len); if (mat->dtype == 2)hipLaunchKernelGGL(( kAssignScalar<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte)alpha, len); if (mat->dtype == 3)hipLaunchKernelGGL(( kAssignScalar<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig)alpha, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int mult_by_scalar(cudamat* mat, double alpha, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kMultScalar<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float)alpha, (float*)target->data_device, len); if (mat->dtype == 1)hipLaunchKernelGGL(( kMultScalar<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl)alpha, (doubl*)target->data_device, len); if (mat->dtype == 2)hipLaunchKernelGGL(( kMultScalar<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte)alpha, (ubyte*)target->data_device, len); if (mat->dtype == 3)hipLaunchKernelGGL(( kMultScalar<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig)alpha, (unsig*)target->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int divide_by_scalar(cudamat* mat, double alpha, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kDivideScalar<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float)alpha, (float*)target->data_device, len); if (mat->dtype == 1)hipLaunchKernelGGL(( kDivideScalar<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl)alpha, (doubl*)target->data_device, len); if (mat->dtype == 2)hipLaunchKernelGGL(( kDivideScalar<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte)alpha, (ubyte*)target->data_device, len); if (mat->dtype == 3)hipLaunchKernelGGL(( kDivideScalar<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig)alpha, (unsig*)target->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int add_scalar(cudamat* mat, double alpha, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0)hipLaunchKernelGGL(( kAddScalar<float>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (float*)mat->data_device, (float)alpha, (float*)target->data_device, len); if (mat->dtype == 1)hipLaunchKernelGGL(( kAddScalar<doubl>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (doubl*)mat->data_device, (doubl)alpha, (doubl*)target->data_device, len); if (mat->dtype == 2)hipLaunchKernelGGL(( kAddScalar<ubyte>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (ubyte*)mat->data_device, (ubyte)alpha, (ubyte*)target->data_device, len); if (mat->dtype == 3)hipLaunchKernelGGL(( kAddScalar<unsig>), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, (unsig*)mat->data_device, (unsig)alpha, (unsig*)target->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern double euclid_norm(cudamat* mat, int* err_code) { int len = mat->size[0]*mat->size[1]; if (mat->dtype >= 2) return ERROR_DTYPE_UNSUPPORTED; double res; if (mat->dtype == 0) res = hipblasSnrm2(len, (const float*)mat->data_device, 1); else res = hipblasDnrm2(len, (const doubl*)mat->data_device, 1); if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (check_cublas_error()) { *err_code = CUBLAS_ERROR; return -1.; } else { *err_code = 0; return res; } } DLLEXP extern int selectRows(cudamat* source, cudamat* target, cudamat* indices){ const int nRetRows = indices->size[1]; if (source->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (indices->dtype != 0) return ERROR_DTYPE_UNSUPPORTED; if (nRetRows==0) return 0; dim3 gridDim((nRetRows+31)/32); dim3 blockDim(32); // TODO: support integer indices if (source->dtype == 0)hipLaunchKernelGGL(( kSelectRows<float>), dim3(gridDim), dim3(blockDim), 0, 0, (float*)source->data_device, (float*)target->data_device, (float*)indices->data_device, nRetRows, source->size[0], source->size[1]); if (source->dtype == 1)hipLaunchKernelGGL(( kSelectRows<doubl>), dim3(gridDim), dim3(blockDim), 0, 0, (doubl*)source->data_device, (doubl*)target->data_device, (float*)indices->data_device, nRetRows, source->size[0], source->size[1]); if (source->dtype == 2)hipLaunchKernelGGL(( kSelectRows<ubyte>), dim3(gridDim), dim3(blockDim), 0, 0, (ubyte*)source->data_device, (ubyte*)target->data_device, (float*)indices->data_device, nRetRows, source->size[0], source->size[1]); if (source->dtype == 3)hipLaunchKernelGGL(( kSelectRows<unsig>), dim3(gridDim), dim3(blockDim), 0, 0, (unsig*)source->data_device, (unsig*)target->data_device, (float*)indices->data_device, nRetRows, source->size[0], source->size[1]); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } DLLEXP extern int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){ const int nSetRows = indices->size[1]; if (source->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (indices->dtype != 0) return ERROR_DTYPE_UNSUPPORTED; if (nSetRows==0) return 0; dim3 gridDim((nSetRows+31)/32); dim3 blockDim(32); if (source->dtype == 0)hipLaunchKernelGGL(( kSetSelectedRows<float>), dim3(gridDim), dim3(blockDim), 0, 0, (float*)target->data_device, (float*)source->data_device, (float*)indices->data_device, nSetRows, target->size[0], target->size[1]); if (source->dtype == 1)hipLaunchKernelGGL(( kSetSelectedRows<doubl>), dim3(gridDim), dim3(blockDim), 0, 0, (doubl*)target->data_device, (doubl*)source->data_device, (float*)indices->data_device, nSetRows, target->size[0], target->size[1]); if (source->dtype == 2)hipLaunchKernelGGL(( kSetSelectedRows<ubyte>), dim3(gridDim), dim3(blockDim), 0, 0, (ubyte*)target->data_device, (ubyte*)source->data_device, (float*)indices->data_device, nSetRows, target->size[0], target->size[1]); if (source->dtype == 3)hipLaunchKernelGGL(( kSetSelectedRows<unsig>), dim3(gridDim), dim3(blockDim), 0, 0, (unsig*)target->data_device, (unsig*)source->data_device, (float*)indices->data_device, nSetRows, target->size[0], target->size[1]); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } DLLEXP extern int dropout(rnd_struct* rnd_state, cudamat* matA, cudamat* matB, float rate, cudamat* targetA, cudamat* targetB) { unsigned int len = matA->size[0] * matA->size[1]; if (!matA->on_device || !targetA->on_device) return ERROR_NOT_ON_DEVICE; if (matA->size[0] != targetA->size[0] || matA->size[1] != targetA->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (matA->dtype != targetA->dtype) return ERROR_MISMATCHED_DTYPE; if (matB) { if (!matB->on_device || !targetB->on_device) return ERROR_NOT_ON_DEVICE; if (matB->dtype != targetB->dtype) return ERROR_MISMATCHED_DTYPE; if (matB->size[0] != targetB->size[0] || matB->size[1] != targetB->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (matA->size[0] != matB->size[0] || matA->size[1] != matB->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (matB->dtype >= 2) return ERROR_DTYPE_UNSUPPORTED; } if (matA->dtype == 0) { if (matB && matB->dtype != 0) return ERROR_DTYPE_UNSUPPORTED; hipLaunchKernelGGL(( kDropout<float,float>), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, (float*)matA->data_device, (float*)(matB ? matB : matA)->data_device, rate, (float*)targetA->data_device, (float*)(targetB ? targetB : targetA)->data_device, len); } else if (matA->dtype == 1) { if (matB && matB->dtype != 1) return ERROR_DTYPE_UNSUPPORTED; hipLaunchKernelGGL(( kDropout<doubl,doubl>), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, (doubl*)matA->data_device, (doubl*)(matB ? matB : matA)->data_device, rate, (doubl*)targetA->data_device, (doubl*)(targetB ? targetB : targetA)->data_device, len); } else if (matA->dtype == 2) { if (!matB) return ERROR_DTYPE_UNSUPPORTED; if (matB->dtype == 0) hipLaunchKernelGGL(( kDropout<ubyte,float>), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, (ubyte*)matA->data_device, (float*)(matB ? matB : matA)->data_device, rate, (ubyte*)targetA->data_device, (float*)(targetB ? targetB : targetA)->data_device, len); if (matB->dtype == 1) hipLaunchKernelGGL(( kDropout<ubyte,doubl>), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, (ubyte*)matA->data_device, (doubl*)(matB ? matB : matA)->data_device, rate, (ubyte*)targetA->data_device, (doubl*)(targetB ? targetB : targetA)->data_device, len); } CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } }
216d63f89735a1d913241341971efbd5c6e75cc6.cu
#include <stdio.h> #include <stdlib.h> #include <cublas.h> #include <cuda.h> #include <limits> //#include "cudamat_kernels.cuh" #include "cudamat_a.cuh" #include "cudamat_kernels.cu" //#define AUTO_CUDA_SYNC #ifdef AUTO_CUDA_SYNC #define CUDA_THREAD_SYNC() cudaThreadSynchronize(); #else #define CUDA_THREAD_SYNC() #endif #ifdef _MSC_VER #define DLLEXP __declspec(dllexport) #else #define DLLEXP #endif extern "C" { typedef unsigned char ubyte; typedef double doubl; typedef unsigned int unsig; inline int dtype_size(int dtype) { if (dtype == 0) return sizeof(float); if (dtype == 1) return sizeof(double); if (dtype == 2) return sizeof(unsigned char); if (dtype == 3) return sizeof(unsigned int); return -1; } /* ------------------------------ CUBLAS init/shutdown ------------------------------ */ inline bool check_cublas_error() { cublasStatus status = cublasGetError(); return status != CUBLAS_STATUS_SUCCESS; } inline bool checkCUDAError(cudaError_t err = cudaSuccess) { if (err == cudaSuccess) err = cudaGetLastError(); if (err != cudaSuccess) printf("%s\n", cudaGetErrorString(err)); return cudaSuccess != err; } DLLEXP extern const char* get_last_cuda_error() { cudaError_t err = cudaGetLastError(); return cudaGetErrorString( err); } DLLEXP extern int cublas_init() { cublasInit(); if (check_cublas_error()) return CUBLAS_ERROR; else return 0; } DLLEXP extern int cublas_shutdown() { cublasShutdown(); if (check_cublas_error()) return CUBLAS_ERROR; else return 0; } DLLEXP extern void cuda_device_reset() { checkCUDAError(cudaDeviceReset()); } DLLEXP extern int cuda_get_device(int* device_id) { cudaError_t error = cudaGetDevice(device_id); return error? CUDA_ERROR : 0; } DLLEXP extern int cuda_get_device_count() { int count = 0; cudaGetDeviceCount(&count); return count; } DLLEXP extern int cuda_get_device_prop(cudaDeviceProp* prop, int device) { cudaError_t error = cudaGetDeviceProperties(prop,device); return error ? CUDA_ERROR : 0; } DLLEXP extern size_t cuda_memory_available() { // force device to be ready for cuMemGetInfo void* ptr = 0; cudaMalloc(&ptr,128); cudaFree(ptr); size_t free = 0, total = 0; CUresult err = cuMemGetInfo(&free,&total); return free; } DLLEXP extern size_t cuda_memory_total() { // force device to be ready for cuMemGetInfo void* ptr = 0; cudaMalloc(&ptr,128); cudaFree(ptr); size_t free = 0, total = 0; CUresult err = cuMemGetInfo(&free,&total); return total; } DLLEXP extern int cuda_set_device(int deviceId) { cudaError_t error = cudaSetDevice(deviceId); return checkCUDAError(error) ? CUDA_ERROR : 0; } DLLEXP extern int init_random(rnd_struct* rnd_state, int seed, char* cudamatpath) { unsigned int * host_mults; host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int)); FILE * pFile; pFile = fopen (cudamatpath,"r"); for (int i = 0; i < NUM_RND_STREAMS; i++) { fscanf (pFile, "%u", &host_mults[i]); } fclose (pFile); cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults); cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words); cublasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1); //cudaMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int)); //cudaMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long)); //cudaMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaThreadSynchronize(); kSeedRandom<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, seed); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } /* ------------------------------ Utility routines ------------------------------ */ DLLEXP extern int get_leading_dimension(cudamat* mat) { return mat->is_trans ? mat->size[1] : mat->size[0]; } DLLEXP extern int get_nonleading_dimension(cudamat* mat) { return mat->is_trans ? mat->size[0] : mat->size[1]; } DLLEXP extern void set_transpose(cudamat* mat, int is_trans) { mat->is_trans = is_trans; } inline char get_transpose_char(cudamat* mat) { return mat->is_trans ? 't' : 'n'; } DLLEXP extern void cuda_sync_threads() { cudaThreadSynchronize(); } /* ------------------------------ Allocating/moving data ------------------------------ */ DLLEXP extern int allocate_device_memory(cudamat* mat) { int len = mat->size[0]*mat->size[1]; cublasStatus stat; if (dtype_size(mat->dtype) <= 0) return ERROR_DTYPE_UNSUPPORTED; stat = cublasAlloc(len, dtype_size(mat->dtype), &mat->data_device); if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) return CUBLAS_ERROR; mat->on_device = 1; return 0; } DLLEXP extern int copy_to_host(cudamat* mat) { int len = mat->size[0]*mat->size[1]; if (mat->on_device) { cublasGetVector(len, dtype_size(mat->dtype), mat->data_device, 1, mat->data_host, 1); if (check_cublas_error()) return CUBLAS_ERROR; } else return ERROR_NOT_ON_DEVICE; return 0; } DLLEXP extern int copy_to_device(cudamat* mat) { int len = mat->size[0]*mat->size[1]; int err_code = 0; //if (!mat->owns_data) // return VIEW_ERROR; if (!mat->on_device) { err_code = allocate_device_memory(mat); if (err_code) return err_code; } cublasSetVector(len, dtype_size(mat->dtype), mat->data_host, 1, mat->data_device, 1); if (check_cublas_error()) return CUBLAS_ERROR; return 0; } DLLEXP extern int copy_on_device(cudamat* mat1, cudamat* mat2) { int len = mat1->size[0]*mat1->size[1]; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != mat2->dtype) return ERROR_MISMATCHED_DTYPE; //cublasScopy(len, mat1->data_device, 1, mat2->data_device, 1); cudaMemcpy(mat2->data_device,mat1->data_device,len*dtype_size(mat1->dtype),cudaMemcpyDeviceToDevice); if (check_cublas_error()) return CUBLAS_ERROR; else return 0; } DLLEXP extern int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) { unsigned int height = source->size[0]; unsigned int width = source->size[1]; if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height) return ERROR_INCOMPATIBLE_DIMENSIONS; if (source->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1); dim3 kernelBlockDim(32, 1, 1); if (source->dtype == 0) kGetRowSlice<float><<<kernelBlockGrid,kernelBlockDim>>>((float*)source->data_device, (float*)target->data_device, start, end, width, height); if (source->dtype == 1) kGetRowSlice<doubl><<<kernelBlockGrid,kernelBlockDim>>>((doubl*)source->data_device, (doubl*)target->data_device, start, end, width, height); if (source->dtype == 2) kGetRowSlice<ubyte><<<kernelBlockGrid,kernelBlockDim>>>((ubyte*)source->data_device, (ubyte*)target->data_device, start, end, width, height); if (source->dtype == 3) kGetRowSlice<unsig><<<kernelBlockGrid,kernelBlockDim>>>((unsig*)source->data_device, (unsig*)target->data_device, start, end, width, height); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } DLLEXP extern int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) { unsigned int height = target->size[0]; unsigned int width = target->size[1]; if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height) return ERROR_INCOMPATIBLE_DIMENSIONS; if (source->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1); dim3 kernelBlockDim(32, 1, 1); if (source->dtype == 0) kSetRowSlice<float><<<kernelBlockGrid,kernelBlockDim>>>((float*)source->data_device, (float*)target->data_device, start, end, width, height); if (source->dtype == 1) kSetRowSlice<doubl><<<kernelBlockGrid,kernelBlockDim>>>((doubl*)source->data_device, (doubl*)target->data_device, start, end, width, height); if (source->dtype == 2) kSetRowSlice<ubyte><<<kernelBlockGrid,kernelBlockDim>>>((ubyte*)source->data_device, (ubyte*)target->data_device, start, end, width, height); if (source->dtype == 3) kSetRowSlice<unsig><<<kernelBlockGrid,kernelBlockDim>>>((unsig*)source->data_device, (unsig*)target->data_device, start, end, width, height); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } DLLEXP extern int copy_transpose(cudamat* source, cudamat* target) { unsigned int height = source->size[0]; unsigned int width = source->size[1]; if (source->size[0] != target->size[1] || source->size[1] != target->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (source->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; // setup execution parameters unsigned int grid_x = height / COPY_BLOCK_SIZE; if (height % COPY_BLOCK_SIZE) grid_x++; unsigned int grid_y = width / COPY_BLOCK_SIZE; if (width % COPY_BLOCK_SIZE) grid_y++; dim3 grid(grid_x, grid_y, 1); dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1); if (source->dtype == 0) kTranspose<float><<< grid, threads >>>((float*)target->data_device, (float*)source->data_device, height, width); if (source->dtype == 1) kTranspose<doubl><<< grid, threads >>>((doubl*)target->data_device, (doubl*)source->data_device, height, width); if (source->dtype == 2) kTranspose<ubyte><<< grid, threads >>>((ubyte*)target->data_device, (ubyte*)source->data_device, height, width); if (source->dtype == 3) kTranspose<unsig><<< grid, threads >>>((unsig*)target->data_device, (unsig*)source->data_device, height, width); if (checkCUDAError()) return CUDA_ERROR; else return 0; } DLLEXP extern int free_device_memory(cudamat* mat) { if (mat->owns_data && mat->on_device) { cublasStatus stat; stat = cublasFree(mat->data_device); mat->on_device = 0; if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) return CUBLAS_ERROR; } return 0; } DLLEXP extern int reshape(cudamat* mat, unsigned int m, unsigned int n) { if (mat->size[0] * mat->size[1] != m * n) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->size[0] = m; mat->size[1] = n; return 0; } DLLEXP extern int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) { if (source->is_trans) return ERROR_TRANSPOSED; if (!source->on_device) return ERROR_NOT_ON_DEVICE; if (last_col > source->size[1] || (first_col >= last_col)) return ERROR_INCOMPATIBLE_DIMENSIONS; int num_rows = source->size[0]; target->data_host = 0; target->data_device = (unsigned char*)source->data_device + first_col * num_rows * dtype_size(source->dtype); target->on_device = 1; target->on_host = 0; target->size[0] = source->size[0]; target->size[1] = last_col - first_col; target->is_trans = 0; target->owns_data = 0; target->dtype = source->dtype; return 0; } DLLEXP extern int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) { // source must be a vector if (source->size[0] > 1 && source->size[1] > 1) return ERROR_GENERIC; if (source->is_trans) return ERROR_TRANSPOSED; if (!source->on_device) return ERROR_NOT_ON_DEVICE; if (first_ind >= last_ind) return ERROR_INCOMPATIBLE_DIMENSIONS; unsigned int num_rows = source->size[0]; target->data_host = 0; target->data_device = (unsigned char*)source->data_device + first_ind * num_rows * dtype_size(source->dtype); target->on_device = 1; target->on_host = 0; target->is_trans = 0; target->owns_data = 0; target->dtype = source->dtype; if (source->size[0] > 1) { if (last_ind > source->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; target->size[0] = last_ind - first_ind; target->size[1] = 1; } else { if (last_ind > source->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; target->size[0] = 1; target->size[1] = last_ind - first_ind; } return 0; } /* ------------------------------ Initialization routines ------------------------------ */ DLLEXP extern void init_from_array(cudamat* mat, void* data, int m, int n, int dtype) { mat->data_host = data; mat->dtype = dtype; mat->size[0] = m; mat->size[1] = n; mat->on_device = 0; mat->on_host = 1; mat->is_trans = 0; mat->owns_data = 1; } DLLEXP extern int init_empty(cudamat* mat, int m, int n, int dtype) { mat->dtype = dtype; mat->size[0] = m; mat->size[1] = n; mat->on_device = 0; mat->on_host = 0; mat->is_trans = 0; mat->owns_data = 1; return allocate_device_memory(mat); } /* ------------------------------ Random number generation ------------------------------ */ DLLEXP extern int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) { int len = mat->size[0] * mat->size[1]; if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (mat->dtype != 0) return ERROR_DTYPE_UNSUPPORTED; kRandomUniform<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, (float*)mat->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } DLLEXP extern int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) { int len = mat->size[0] * mat->size[1]; if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (mat->dtype != 0) return ERROR_DTYPE_UNSUPPORTED; kRandomGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, (float*)mat->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } /* ------------------------------ Algebraic operations ------------------------------ */ DLLEXP extern int diff_cols(cudamat* mat, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]+1) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kDiffCols<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)target->data_device, w, h); if (mat->dtype == 1) kDiffCols<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)target->data_device, w, h); if (mat->dtype == 2) kDiffCols<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte*)target->data_device, w, h); if (mat->dtype == 3) kDiffCols<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig*)target->data_device, w, h); CUDA_THREAD_SYNC(); if (checkCUDAError()) { return CUDA_ERROR; } return 0; } DLLEXP extern int diff_rows(cudamat* mat, cudamat* target) { return ERROR_UNSUPPORTED; // TODO } DLLEXP extern int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != vec->dtype || mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kAddColVector<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)vec->data_device, (float*)target->data_device, w, h); if (mat->dtype == 1) kAddColVector<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)vec->data_device, (doubl*)target->data_device, w, h); if (mat->dtype == 2) kAddColVector<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte*)vec->data_device, (ubyte*)target->data_device, w, h); if (mat->dtype == 3) kAddColVector<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig*)vec->data_device, (unsig*)target->data_device, w, h); CUDA_THREAD_SYNC(); if (checkCUDAError()) { return CUDA_ERROR; } return 0; } DLLEXP extern int sub_col_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != vec->dtype || mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kSubColVector<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)vec->data_device, (float*)target->data_device, w, h); if (mat->dtype == 1) kSubColVector<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)vec->data_device, (doubl*)target->data_device, w, h); if (mat->dtype == 2) kSubColVector<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte*)vec->data_device, (ubyte*)target->data_device, w, h); if (mat->dtype == 3) kSubColVector<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig*)vec->data_device, (unsig*)target->data_device, w, h); CUDA_THREAD_SYNC(); if (checkCUDAError()) { return CUDA_ERROR; } return 0; } DLLEXP extern int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, double mult) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != vec->dtype || mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kAddColMult<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)vec->data_device, (float*)target->data_device, (float)mult, w, h); if (mat->dtype == 1) kAddColMult<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)vec->data_device, (doubl*)target->data_device, (doubl)mult, w, h); if (mat->dtype == 2) kAddColMult<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte*)vec->data_device, (ubyte*)target->data_device, (ubyte)mult, w, h); if (mat->dtype == 3) kAddColMult<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig*)vec->data_device, (unsig*)target->data_device, (unsig)mult, w, h); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[1] != vec->size[1] || vec->size[0] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != vec->dtype || mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kAddRowVector<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)vec->data_device, (float*)target->data_device, w, h); if (mat->dtype == 1) kAddRowVector<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)vec->data_device, (doubl*)target->data_device, w, h); if (mat->dtype == 2) kAddRowVector<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte*)vec->data_device, (ubyte*)target->data_device, w, h); if (mat->dtype == 3) kAddRowVector<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig*)vec->data_device, (unsig*)target->data_device, w, h); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int sub_row_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[1] != vec->size[1] || vec->size[0] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != vec->dtype || mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kSubRowVector<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)vec->data_device, (float*)target->data_device, w, h); if (mat->dtype == 1) kSubRowVector<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)vec->data_device, (doubl*)target->data_device, w, h); if (mat->dtype == 2) kSubRowVector<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte*)vec->data_device, (ubyte*)target->data_device, w, h); if (mat->dtype == 3) kSubRowVector<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig*)vec->data_device, (unsig*)target->data_device, w, h); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != vec->dtype || mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kMultByColVector<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)vec->data_device, (float*)target->data_device, w, h); if (mat->dtype == 1) kMultByColVector<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)vec->data_device, (doubl*)target->data_device, w, h); if (mat->dtype == 2) kMultByColVector<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte*)vec->data_device, (ubyte*)target->data_device, w, h); if (mat->dtype == 3) kMultByColVector<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig*)vec->data_device, (unsig*)target->data_device, w, h); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[1] != vec->size[1] || vec->size[0] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != vec->dtype || mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kMultByRowVector<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)vec->data_device, (float*)target->data_device, w, h); if (mat->dtype == 1) kMultByRowVector<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)vec->data_device, (doubl*)target->data_device, w, h); if (mat->dtype == 2) kMultByRowVector<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte*)vec->data_device, (ubyte*)target->data_device, w, h); if (mat->dtype == 3) kMultByRowVector<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig*)vec->data_device, (unsig*)target->data_device, w, h); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int clip_norm(cudamat* mat, cudamat* vec, double eps, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != vec->dtype || mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kClipNorm<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)vec->data_device, (float)eps, (float*)target->data_device, w, h); if (mat->dtype == 1) kClipNorm<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)vec->data_device, (doubl)eps, (doubl*)target->data_device, w, h); if (mat->dtype == 2) kClipNorm<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte*)vec->data_device, (ubyte)eps, (ubyte*)target->data_device, w, h); if (mat->dtype == 3) kClipNorm<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig*)vec->data_device, (unsig)eps, (unsig*)target->data_device, w, h); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != mat2->dtype) return ERROR_MISMATCHED_DTYPE; if (target->dtype == 0) { if (mat1->dtype == 0) kLessThan<float,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat1->data_device, (float*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 1) kLessThan<doubl,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat1->data_device, (doubl*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 2) kLessThan<ubyte,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 3) kLessThan<unsig,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat1->data_device, (unsig*)mat2->data_device, (float*)target->data_device, len); } else if (target->dtype == 2) { if (mat1->dtype == 0) kLessThan<float,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat1->data_device, (float*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 1) kLessThan<doubl,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat1->data_device, (doubl*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 2) kLessThan<ubyte,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 3) kLessThan<unsig,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat1->data_device, (unsig*)mat2->data_device, (ubyte*)target->data_device, len); } else if (target->dtype == 3) { if (mat1->dtype == 0) kLessThan<float,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat1->data_device, (float*)mat2->data_device, (unsig*)target->data_device, len); if (mat1->dtype == 1) kLessThan<doubl,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat1->data_device, (doubl*)mat2->data_device, (unsig*)target->data_device, len); if (mat1->dtype == 2) kLessThan<ubyte,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (unsig*)target->data_device, len); if (mat1->dtype == 3) kLessThan<unsig,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat1->data_device, (unsig*)mat2->data_device, (unsig*)target->data_device, len); } else { return ERROR_DTYPE_UNSUPPORTED; } CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int less_than_scalar(cudamat* mat, double val, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (target->dtype == 0) { if (mat->dtype == 0) kLessThanScalar<float,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float)val, (float*)target->data_device, len); if (mat->dtype == 1) kLessThanScalar<doubl,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl)val, (float*)target->data_device, len); if (mat->dtype == 2) kLessThanScalar<ubyte,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte)val, (float*)target->data_device, len); if (mat->dtype == 3) kLessThanScalar<unsig,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig)val, (float*)target->data_device, len); } else if (target->dtype == 2) { if (mat->dtype == 0) kLessThanScalar<float,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float)val, (ubyte*)target->data_device, len); if (mat->dtype == 1) kLessThanScalar<doubl,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl)val, (ubyte*)target->data_device, len); if (mat->dtype == 2) kLessThanScalar<ubyte,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte)val, (ubyte*)target->data_device, len); if (mat->dtype == 3) kLessThanScalar<unsig,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig)val, (ubyte*)target->data_device, len); } else if (target->dtype == 3) { if (mat->dtype == 0) kLessThanScalar<float,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float)val, (unsig*)target->data_device, len); if (mat->dtype == 1) kLessThanScalar<doubl,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl)val, (unsig*)target->data_device, len); if (mat->dtype == 2) kLessThanScalar<ubyte,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte)val, (unsig*)target->data_device, len); if (mat->dtype == 3) kLessThanScalar<unsig,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig)val, (unsig*)target->data_device, len); } else { return ERROR_DTYPE_UNSUPPORTED; } CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != mat2->dtype) return ERROR_MISMATCHED_DTYPE; if (target->dtype == 0) { if (mat1->dtype == 0) kGreaterThan<float,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat1->data_device, (float*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 1) kGreaterThan<doubl,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat1->data_device, (doubl*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 2) kGreaterThan<ubyte,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 3) kGreaterThan<unsig,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat1->data_device, (unsig*)mat2->data_device, (float*)target->data_device, len); } else if (target->dtype == 2) { if (mat1->dtype == 0) kGreaterThan<float,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat1->data_device, (float*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 1) kGreaterThan<doubl,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat1->data_device, (doubl*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 2) kGreaterThan<ubyte,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 3) kGreaterThan<unsig,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat1->data_device, (unsig*)mat2->data_device, (ubyte*)target->data_device, len); } else if (target->dtype == 3) { if (mat1->dtype == 0) kGreaterThan<float,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat1->data_device, (float*)mat2->data_device, (unsig*)target->data_device, len); if (mat1->dtype == 1) kGreaterThan<doubl,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat1->data_device, (doubl*)mat2->data_device, (unsig*)target->data_device, len); if (mat1->dtype == 2) kGreaterThan<ubyte,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (unsig*)target->data_device, len); if (mat1->dtype == 3) kGreaterThan<unsig,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat1->data_device, (unsig*)mat2->data_device, (unsig*)target->data_device, len); } else { return ERROR_DTYPE_UNSUPPORTED; } CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int greater_than_scalar(cudamat* mat, double val, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (target->dtype == 0) { if (mat->dtype == 0) kGreaterThanScalar<float,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float)val, (float*)target->data_device, len); if (mat->dtype == 1) kGreaterThanScalar<doubl,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl)val, (float*)target->data_device, len); if (mat->dtype == 2) kGreaterThanScalar<ubyte,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte)val, (float*)target->data_device, len); if (mat->dtype == 3) kGreaterThanScalar<unsig,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig)val, (float*)target->data_device, len); } else if (target->dtype == 2) { if (mat->dtype == 0) kGreaterThanScalar<float,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float)val, (ubyte*)target->data_device, len); if (mat->dtype == 1) kGreaterThanScalar<doubl,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl)val, (ubyte*)target->data_device, len); if (mat->dtype == 2) kGreaterThanScalar<ubyte,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte)val, (ubyte*)target->data_device, len); if (mat->dtype == 3) kGreaterThanScalar<unsig,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig)val, (ubyte*)target->data_device, len); } else if (target->dtype == 3) { if (mat->dtype == 0) kGreaterThanScalar<float,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float)val, (unsig*)target->data_device, len); if (mat->dtype == 1) kGreaterThanScalar<doubl,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl)val, (unsig*)target->data_device, len); if (mat->dtype == 2) kGreaterThanScalar<ubyte,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte)val, (unsig*)target->data_device, len); if (mat->dtype == 3) kGreaterThanScalar<unsig,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig)val, (unsig*)target->data_device, len); } else { return ERROR_DTYPE_UNSUPPORTED; } CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int maximum(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != mat2->dtype || mat1->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat1->dtype == 0) kMaximum<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat1->data_device, (float*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 1) kMaximum<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat1->data_device, (doubl*)mat2->data_device, (doubl*)target->data_device, len); if (mat1->dtype == 2) kMaximum<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 3) kMaximum<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat1->data_device, (unsig*)mat2->data_device, (unsig*)target->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int maximum_scalar(cudamat* mat, double val, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kMaximumScalar<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float)val, (float*)target->data_device, len); if (mat->dtype == 1) kMaximumScalar<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl)val, (doubl*)target->data_device, len); if (mat->dtype == 2) kMaximumScalar<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte)val, (ubyte*)target->data_device, len); if (mat->dtype == 3) kMaximumScalar<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig)val, (unsig*)target->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int max_by_axis(cudamat* mat, cudamat* target, int axis) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype == 0) kMaxColumnwise<float><<<w,32>>>((float*)mat->data_device, (float*)target->data_device, w, h, std::numeric_limits<float>::min()); if (mat->dtype == 1) kMaxColumnwise<doubl><<<w,32>>>((doubl*)mat->data_device, (doubl*)target->data_device, w, h, std::numeric_limits<doubl>::min()); if (mat->dtype == 2) kMaxColumnwise<ubyte><<<w,32>>>((ubyte*)mat->data_device, (ubyte*)target->data_device, w, h, 0); if (mat->dtype == 3) kMaxColumnwise<unsig><<<w,32>>>((unsig*)mat->data_device, (unsig*)target->data_device, w, h, 0); CUDA_THREAD_SYNC(); } else return ERROR_UNSUPPORTED; if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int min_by_axis(cudamat* mat, cudamat* target, int axis) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype == 0) kMinColumnwise<float><<<w,32>>>((float*)mat->data_device, (float*)target->data_device, w, h, std::numeric_limits<float>::max()); if (mat->dtype == 1) kMinColumnwise<doubl><<<w,32>>>((doubl*)mat->data_device, (doubl*)target->data_device, w, h, std::numeric_limits<doubl>::max()); if (mat->dtype == 2) kMinColumnwise<ubyte><<<w,32>>>((ubyte*)mat->data_device, (ubyte*)target->data_device, w, h, 0); if (mat->dtype == 3) kMinColumnwise<unsig><<<w,32>>>((unsig*)mat->data_device, (unsig*)target->data_device, w, h, 0); CUDA_THREAD_SYNC(); } else return ERROR_UNSUPPORTED; if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int sign(cudamat* mat, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kSign<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1) kSign<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_relu(cudamat* mat, cudamat* target, cudamat* dtarget) { unsigned int len = mat->size[0] * mat->size[1]; if (!dtarget) return maximum_scalar(mat,0,target); if (!mat->on_device || !target->on_device || !dtarget->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->size[0] != dtarget->size[0] || mat->size[1] != dtarget->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype || mat->dtype != dtarget->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kApplyReluD<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)target->data_device, (float*)dtarget->data_device, len); else if (mat->dtype == 1) kApplyReluD<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)target->data_device, (doubl*)dtarget->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_sigmoid(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kApplySigmoid<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1) kApplySigmoid<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_sigmoid_deriv(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kApplySigmoidDeriv<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1) kApplySigmoidDeriv<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_tanh(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kApplyTanh<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1) kApplyTanh<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_tanh_deriv(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kApplyTanhDeriv<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1) kApplyTanhDeriv<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_abs(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kApplyAbs<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)target->data_device, len); if (mat->dtype == 1) kApplyAbs<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)target->data_device, len); /* do nothing for unsigned types */ CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_log_1_plus_exp(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kApplyLog1PlusExp<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1) kApplyLog1PlusExp<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_log(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kLog<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1) kLog<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_exp(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kExp<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1) kExp<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_sqrt(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kSqrt<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1) kSqrt<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)target->data_device, len); else if (mat->dtype == 2) kSqrt<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int square(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kSquare<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1) kSquare<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)target->data_device, len); else if (mat->dtype == 2) kSquare<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_pow(cudamat* mat, double pow, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kPow<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float)pow, (float*)target->data_device, len); else if (mat->dtype == 1) kPow<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl)pow, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype || mat->dtype != pow->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kPowMatrix<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)pow->data_device, (float*)target->data_device, len); else if (mat->dtype == 1) kPowMatrix<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)pow->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int reciprocal(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kReciprocal<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float*)target->data_device, len); else if (mat->dtype == 1) kReciprocal<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl*)target->data_device, len); else return ERROR_DTYPE_UNSUPPORTED; CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int dot(cudamat* mat1, cudamat* mat2, cudamat* target, double beta, double alpha) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->dtype != mat2->dtype || mat1->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat1->dtype >= 2 || mat2->dtype >= 2 || target->dtype >= 2) return ERROR_DTYPE_UNSUPPORTED; if (get_leading_dimension(mat1) != get_leading_dimension(target) || get_nonleading_dimension(mat2) != get_nonleading_dimension(target) || get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) { return ERROR_INCOMPATIBLE_DIMENSIONS; } int m = get_leading_dimension(mat1), k = get_leading_dimension(mat2), n = get_nonleading_dimension(mat2); if (mat1->dtype == 0) { cublasSgemm(get_transpose_char(mat1), get_transpose_char(mat2), m, n, k, (float)alpha, (const float*)mat1->data_device, mat1->size[0], (const float*)mat2->data_device, mat2->size[0], (float)beta, (float*)target->data_device, target->size[0]); } else { cublasDgemm(get_transpose_char(mat1), get_transpose_char(mat2), m, n, k, (doubl)alpha, (const doubl*)mat1->data_device, mat1->size[0], (const doubl*)mat2->data_device, mat2->size[0], (doubl)beta, (doubl*)target->data_device, target->size[0]); } if (check_cublas_error()) return CUBLAS_ERROR; return 0; } DLLEXP extern double vdot(cudamat* mat1, cudamat* mat2, int* err_code) { int len = mat1->size[0]*mat1->size[1]; double res; if (!mat1->on_device || !mat2->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) { *err_code = ERROR_TRANSPOSEDNESS; return 0; } if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) { *err_code = ERROR_INCOMPATIBLE_DIMENSIONS; return 0; } if (mat1->dtype != mat2->dtype) return ERROR_MISMATCHED_DTYPE; if (mat1->dtype >= 2 || mat2->dtype >= 2) return ERROR_DTYPE_UNSUPPORTED; if (mat1->dtype == 0) res = cublasSdot(len, (const float*)mat1->data_device, 1, (const float*)mat2->data_device, 1); if (mat1->dtype == 1) res = cublasDdot(len, (const doubl*)mat1->data_device, 1, (const doubl*)mat2->data_device, 1); if (check_cublas_error()) { *err_code = CUBLAS_ERROR; return -1.; } else { *err_code = 0; return res; } } /* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must have the same transposedness. */ DLLEXP extern int add_mult(cudamat* mat1, cudamat* mat2, double alpha) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != mat2->dtype) return ERROR_MISMATCHED_DTYPE; if (mat1->dtype >= 2 || mat2->dtype >= 2) return ERROR_DTYPE_UNSUPPORTED; if (mat1->dtype == 0) cublasSaxpy(len, (float)alpha, (const float*)mat2->data_device, 1, (float*)mat1->data_device, 1); else cublasDaxpy(len, (doubl)alpha, (const doubl*)mat2->data_device, 1, (doubl*)mat1->data_device, 1); if (check_cublas_error()) return CUBLAS_ERROR; return 0; } DLLEXP extern int add_transpose(cudamat* mat1, cudamat* mat2, cudamat* target) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[1] || mat1->size[1] != mat2->size[0] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != target->dtype || mat2->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; unsigned int height = mat2->size[0]; unsigned int width = mat2->size[1]; // setup execution parameters unsigned int grid_x = height / COPY_BLOCK_SIZE; if (height % COPY_BLOCK_SIZE) grid_x++; unsigned int grid_y = width / COPY_BLOCK_SIZE; if (width % COPY_BLOCK_SIZE) grid_y++; dim3 grid(grid_x, grid_y, 1); dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1); if (mat1->dtype == 0) kAddTrans<float><<<grid,threads>>>((float*)mat1->data_device, (float*)mat2->data_device, (float*)target->data_device, height,width); if (mat1->dtype == 1) kAddTrans<doubl><<<grid,threads>>>((doubl*)mat1->data_device, (doubl*)mat2->data_device, (doubl*)target->data_device, height,width); if (mat1->dtype == 2) kAddTrans<ubyte><<<grid,threads>>>((ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (ubyte*)target->data_device, height,width); if (mat1->dtype == 3) kAddTrans<unsig><<<grid,threads>>>((unsig*)mat1->data_device, (unsig*)mat2->data_device, (unsig*)target->data_device, height,width); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != target->dtype || mat2->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat1->dtype == 0) kAdd<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat1->data_device, (float*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 1) kAdd<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat1->data_device, (doubl*)mat2->data_device, (doubl*)target->data_device, len); if (mat1->dtype == 2) kAdd<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 3) kAdd<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat1->data_device, (unsig*)mat2->data_device, (unsig*)target->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != target->dtype || mat2->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat1->dtype == 0) kSubtract<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat1->data_device, (float*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 1) kSubtract<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat1->data_device, (doubl*)mat2->data_device, (doubl*)target->data_device, len); if (mat1->dtype == 2) kSubtract<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 3) kSubtract<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat1->data_device, (unsig*)mat2->data_device, (unsig*)target->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int subtract_transpose(cudamat* mat1, cudamat* mat2, cudamat* target) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[1] || mat1->size[1] != mat2->size[0] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != target->dtype || mat2->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; unsigned int height = mat2->size[0]; unsigned int width = mat2->size[1]; // setup execution parameters unsigned int grid_x = height / COPY_BLOCK_SIZE; if (height % COPY_BLOCK_SIZE) grid_x++; unsigned int grid_y = width / COPY_BLOCK_SIZE; if (width % COPY_BLOCK_SIZE) grid_y++; dim3 grid(grid_x, grid_y, 1); dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1); if (mat1->dtype == 0) kSubtractTrans<float><<<grid,threads>>>((float*)mat1->data_device, (float*)mat2->data_device, (float*)target->data_device, height,width); if (mat1->dtype == 1) kSubtractTrans<doubl><<<grid,threads>>>((doubl*)mat1->data_device, (doubl*)mat2->data_device, (doubl*)target->data_device, height,width); if (mat1->dtype == 2) kSubtractTrans<ubyte><<<grid,threads>>>((ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (ubyte*)target->data_device, height,width); if (mat1->dtype == 3) kSubtractTrans<unsig><<<grid,threads>>>((unsig*)mat1->data_device, (unsig*)mat2->data_device, (unsig*)target->data_device, height,width); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != target->dtype || mat2->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat1->dtype == 0) kDivide<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat1->data_device, (float*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 1) kDivide<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat1->data_device, (doubl*)mat2->data_device, (doubl*)target->data_device, len); if (mat1->dtype == 2) kDivide<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 3) kDivide<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat1->data_device, (unsig*)mat2->data_device, (unsig*)target->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } /* Elementwise multiplication of 2 matrices */ DLLEXP extern int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat1->dtype != target->dtype || mat2->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat1->dtype == 0) kMult<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat1->data_device, (float*)mat2->data_device, (float*)target->data_device, len); if (mat1->dtype == 1) kMult<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat1->data_device, (doubl*)mat2->data_device, (doubl*)target->data_device, len); if (mat1->dtype == 2) kMult<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat1->data_device, (ubyte*)mat2->data_device, (ubyte*)target->data_device, len); if (mat1->dtype == 3) kMult<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat1->data_device, (unsig*)mat2->data_device, (unsig*)target->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int assign_array(cudamat* src, cudamat* dst) { int len = src->size[0]*src->size[1]; if (!src->on_device || !dst->on_device) return ERROR_NOT_ON_DEVICE; if (src->size[0] != dst->size[0] || src->size[1] != dst->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (src->dtype == 0) { if (dst->dtype == 1) kAssignArray<float,doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)src->data_device, (doubl*)dst->data_device, len); if (dst->dtype == 2) kAssignArray<float,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)src->data_device, (ubyte*)dst->data_device, len); if (dst->dtype == 3) kAssignArray<float,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)src->data_device, (unsig*)dst->data_device, len); } else if (src->dtype == 1) { if (dst->dtype == 0) kAssignArray<doubl,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)src->data_device, (float*)dst->data_device, len); if (dst->dtype == 2) kAssignArray<doubl,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)src->data_device, (ubyte*)dst->data_device, len); if (dst->dtype == 3) kAssignArray<doubl,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)src->data_device, (unsig*)dst->data_device, len); } else if (src->dtype == 2) { if (dst->dtype == 0) kAssignArray<ubyte,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)src->data_device, (float*)dst->data_device, len); if (dst->dtype == 1) kAssignArray<ubyte,doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)src->data_device, (doubl*)dst->data_device, len); if (dst->dtype == 3) kAssignArray<ubyte,unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)src->data_device, (unsig*)dst->data_device, len); } else if (src->dtype == 3) { if (dst->dtype == 0) kAssignArray<unsig,float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)src->data_device, (float*)dst->data_device, len); if (dst->dtype == 1) kAssignArray<unsig,doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)src->data_device, (doubl*)dst->data_device, len); if (dst->dtype == 2) kAssignArray<unsig,ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)src->data_device, (ubyte*)dst->data_device, len); } CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int assign_scalar(cudamat* mat, double alpha) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (mat->dtype == 0) kAssignScalar<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float)alpha, len); if (mat->dtype == 1) kAssignScalar<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl)alpha, len); if (mat->dtype == 2) kAssignScalar<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte)alpha, len); if (mat->dtype == 3) kAssignScalar<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig)alpha, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int mult_by_scalar(cudamat* mat, double alpha, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kMultScalar<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float)alpha, (float*)target->data_device, len); if (mat->dtype == 1) kMultScalar<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl)alpha, (doubl*)target->data_device, len); if (mat->dtype == 2) kMultScalar<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte)alpha, (ubyte*)target->data_device, len); if (mat->dtype == 3) kMultScalar<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig)alpha, (unsig*)target->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int divide_by_scalar(cudamat* mat, double alpha, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kDivideScalar<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float)alpha, (float*)target->data_device, len); if (mat->dtype == 1) kDivideScalar<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl)alpha, (doubl*)target->data_device, len); if (mat->dtype == 2) kDivideScalar<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte)alpha, (ubyte*)target->data_device, len); if (mat->dtype == 3) kDivideScalar<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig)alpha, (unsig*)target->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern int add_scalar(cudamat* mat, double alpha, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (mat->dtype == 0) kAddScalar<float><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((float*)mat->data_device, (float)alpha, (float*)target->data_device, len); if (mat->dtype == 1) kAddScalar<doubl><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((doubl*)mat->data_device, (doubl)alpha, (doubl*)target->data_device, len); if (mat->dtype == 2) kAddScalar<ubyte><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((ubyte*)mat->data_device, (ubyte)alpha, (ubyte*)target->data_device, len); if (mat->dtype == 3) kAddScalar<unsig><<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>((unsig*)mat->data_device, (unsig)alpha, (unsig*)target->data_device, len); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } DLLEXP extern double euclid_norm(cudamat* mat, int* err_code) { int len = mat->size[0]*mat->size[1]; if (mat->dtype >= 2) return ERROR_DTYPE_UNSUPPORTED; double res; if (mat->dtype == 0) res = cublasSnrm2(len, (const float*)mat->data_device, 1); else res = cublasDnrm2(len, (const doubl*)mat->data_device, 1); if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (check_cublas_error()) { *err_code = CUBLAS_ERROR; return -1.; } else { *err_code = 0; return res; } } DLLEXP extern int selectRows(cudamat* source, cudamat* target, cudamat* indices){ const int nRetRows = indices->size[1]; if (source->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (indices->dtype != 0) return ERROR_DTYPE_UNSUPPORTED; if (nRetRows==0) return 0; dim3 gridDim((nRetRows+31)/32); dim3 blockDim(32); // TODO: support integer indices if (source->dtype == 0) kSelectRows<float><<<gridDim, blockDim>>>((float*)source->data_device, (float*)target->data_device, (float*)indices->data_device, nRetRows, source->size[0], source->size[1]); if (source->dtype == 1) kSelectRows<doubl><<<gridDim, blockDim>>>((doubl*)source->data_device, (doubl*)target->data_device, (float*)indices->data_device, nRetRows, source->size[0], source->size[1]); if (source->dtype == 2) kSelectRows<ubyte><<<gridDim, blockDim>>>((ubyte*)source->data_device, (ubyte*)target->data_device, (float*)indices->data_device, nRetRows, source->size[0], source->size[1]); if (source->dtype == 3) kSelectRows<unsig><<<gridDim, blockDim>>>((unsig*)source->data_device, (unsig*)target->data_device, (float*)indices->data_device, nRetRows, source->size[0], source->size[1]); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } DLLEXP extern int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){ const int nSetRows = indices->size[1]; if (source->dtype != target->dtype) return ERROR_MISMATCHED_DTYPE; if (indices->dtype != 0) return ERROR_DTYPE_UNSUPPORTED; if (nSetRows==0) return 0; dim3 gridDim((nSetRows+31)/32); dim3 blockDim(32); if (source->dtype == 0) kSetSelectedRows<float><<<gridDim, blockDim>>>((float*)target->data_device, (float*)source->data_device, (float*)indices->data_device, nSetRows, target->size[0], target->size[1]); if (source->dtype == 1) kSetSelectedRows<doubl><<<gridDim, blockDim>>>((doubl*)target->data_device, (doubl*)source->data_device, (float*)indices->data_device, nSetRows, target->size[0], target->size[1]); if (source->dtype == 2) kSetSelectedRows<ubyte><<<gridDim, blockDim>>>((ubyte*)target->data_device, (ubyte*)source->data_device, (float*)indices->data_device, nSetRows, target->size[0], target->size[1]); if (source->dtype == 3) kSetSelectedRows<unsig><<<gridDim, blockDim>>>((unsig*)target->data_device, (unsig*)source->data_device, (float*)indices->data_device, nSetRows, target->size[0], target->size[1]); CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } DLLEXP extern int dropout(rnd_struct* rnd_state, cudamat* matA, cudamat* matB, float rate, cudamat* targetA, cudamat* targetB) { unsigned int len = matA->size[0] * matA->size[1]; if (!matA->on_device || !targetA->on_device) return ERROR_NOT_ON_DEVICE; if (matA->size[0] != targetA->size[0] || matA->size[1] != targetA->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (matA->dtype != targetA->dtype) return ERROR_MISMATCHED_DTYPE; if (matB) { if (!matB->on_device || !targetB->on_device) return ERROR_NOT_ON_DEVICE; if (matB->dtype != targetB->dtype) return ERROR_MISMATCHED_DTYPE; if (matB->size[0] != targetB->size[0] || matB->size[1] != targetB->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (matA->size[0] != matB->size[0] || matA->size[1] != matB->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (matB->dtype >= 2) return ERROR_DTYPE_UNSUPPORTED; } if (matA->dtype == 0) { if (matB && matB->dtype != 0) return ERROR_DTYPE_UNSUPPORTED; kDropout<float,float><<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, (float*)matA->data_device, (float*)(matB ? matB : matA)->data_device, rate, (float*)targetA->data_device, (float*)(targetB ? targetB : targetA)->data_device, len); } else if (matA->dtype == 1) { if (matB && matB->dtype != 1) return ERROR_DTYPE_UNSUPPORTED; kDropout<doubl,doubl><<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, (doubl*)matA->data_device, (doubl*)(matB ? matB : matA)->data_device, rate, (doubl*)targetA->data_device, (doubl*)(targetB ? targetB : targetA)->data_device, len); } else if (matA->dtype == 2) { if (!matB) return ERROR_DTYPE_UNSUPPORTED; if (matB->dtype == 0) kDropout<ubyte,float><<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, (ubyte*)matA->data_device, (float*)(matB ? matB : matA)->data_device, rate, (ubyte*)targetA->data_device, (float*)(targetB ? targetB : targetA)->data_device, len); if (matB->dtype == 1) kDropout<ubyte,doubl><<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, (ubyte*)matA->data_device, (doubl*)(matB ? matB : matA)->data_device, rate, (ubyte*)targetA->data_device, (doubl*)(targetB ? targetB : targetA)->data_device, len); } CUDA_THREAD_SYNC(); if (checkCUDAError()) return CUDA_ERROR; return 0; } }